diff --git a/0.8.0/authorino/docs/user-guides/mtls-authentication/index.html b/0.8.0/authorino/docs/user-guides/mtls-authentication/index.html index 47b529be..dc0ddcad 100644 --- a/0.8.0/authorino/docs/user-guides/mtls-authentication/index.html +++ b/0.8.0/authorino/docs/user-guides/mtls-authentication/index.html @@ -4182,9 +4182,9 @@
Create a CA (Certificate Authority) certificate to issue the client certificates that will be used to authenticate clients that send requests to the Talker API:
-openssl req -x509 -sha256 -nodes \
+openssl req -x509 -sha512 -nodes \
-days 365 \
- -newkey rsa:2048 \
+ -newkey rsa:4096 \
-subj "/CN=talker-api-ca" \
-addext basicConstraints=CA:TRUE \
-addext keyUsage=digitalSignature,keyCertSign \
@@ -4220,7 +4220,7 @@ ❺ Setup Envoy - address:
socket_address:
address: 0.0.0.0
- port_value: 8000
+ port_value: 8443
filter_chains:
- transport_socket:
name: envoy.transport_sockets.tls
@@ -4328,7 +4328,7 @@ ❺ Setup Envoy image: envoyproxy/envoy:v1.19-latest
name: envoy
ports:
- - containerPort: 8000
+ - containerPort: 8443
name: web
- containerPort: 8001
name: admin
@@ -4368,7 +4368,7 @@ ❺ Setup Envoy app: envoy
ports:
- name: web
- port: 8000
+ port: 8443
protocol: TCP
---
apiVersion: networking.k8s.io/v1
@@ -4383,13 +4383,13 @@ ❺ Setup Envoy - backend:
service:
name: envoy
- port: { number: 8000 }
+ port: { number: 8443 }
path: /
pathType: Prefix
EOF
-The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
-kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &
+The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8443 to the Envoy service running inside the cluster:
+
❻ Create the AuthConfig
¶
Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
@@ -4431,43 +4431,43 @@ ❻ Create the AuthConfig
❼ Consume the API¶
With a TLS certificate signed by the trusted CA:
-openssl genrsa -out /tmp/aisha.key 2048
+openssl genrsa -out /tmp/aisha.key 4096
openssl req -new -subj "/CN=aisha/C=PK/L=Islamabad/O=ACME Inc./OU=Engineering" -key /tmp/aisha.key -out /tmp/aisha.csr
-openssl x509 -req -sha256 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt
+openssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt
-curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8000 -i
+curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i
# HTTP/1.1 200 OK
With a TLS certificate signed by the trusted CA, though missing an authorized Organization:
-openssl genrsa -out /tmp/john.key 2048
+openssl genrsa -out /tmp/john.key 4096
openssl req -new -subj "/CN=john/C=UK/L=London" -key /tmp/john.key -out /tmp/john.csr
-openssl x509 -req -sha256 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt
+openssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt
-curl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8000 -i
+curl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8443 -i
# HTTP/1.1 403 Forbidden
# x-ext-auth-reason: Unauthorized
❽ Try the AuthConfig via raw HTTP authorization interface¶
Expose Authorino's raw HTTP authorization to the local host:
-kubectl port-forward service/authorino-authorino-authorization 5001:5001 &
+
With a TLS certificate signed by the trusted CA:
curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i
# HTTP/2 200
With a TLS certificate signed by an unknown authority:
-openssl req -x509 -sha256 -nodes \
+openssl req -x509 -sha512 -nodes \
-days 365 \
- -newkey rsa:2048 \
+ -newkey rsa:4096 \
-subj "/CN=untrusted" \
-addext basicConstraints=CA:TRUE \
-addext keyUsage=digitalSignature,keyCertSign \
-keyout /tmp/untrusted-ca.key \
-out /tmp/untrusted-ca.crt
-openssl genrsa -out /tmp/niko.key 2048
+openssl genrsa -out /tmp/niko.key 4096
openssl req -new -subj "/CN=niko/C=JP/L=Osaka" -key /tmp/niko.key -out /tmp/niko.csr
-openssl x509 -req -sha256 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt
+openssl x509 -req -sha512 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt
curl -k --cert /tmp/niko.crt --key /tmp/niko.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i
# HTTP/2 401
@@ -4479,7 +4479,7 @@ ❾ Revoke an entire chain of ce
Even if the deleted root certificate is still cached and accepted at the gateway, Authorino will revoke access at application level immediately.
Try with a previously accepted certificate:
-curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8000 -i
+curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i
# HTTP/1.1 401 Unauthorized
# www-authenticate: Basic realm="mtls"
# x-ext-auth-reason: x509: certificate signed by unknown authority
diff --git a/0.8.0/kuadrant-operator/doc/auth/index.html b/0.8.0/kuadrant-operator/doc/auth/index.html
index e445cedc..0856b229 100644
--- a/0.8.0/kuadrant-operator/doc/auth/index.html
+++ b/0.8.0/kuadrant-operator/doc/auth/index.html
@@ -4610,9 +4610,9 @@ Route selectorswhen
conditions¶
when
conditions can be used to scope an AuthPolicy or auth rule within an AuthPolicy (i.e. to filter the traffic to which a policy or policy rule applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules via routeSelectors
.
Use when
conditions to conditionally activate policies and policy rules based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames
and spec.rules.matches
fields, or in general in AuthPolicies that target a Gateway.
-when
conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.
+when
conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.
The selectors within the when
conditions of an AuthPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.
-Authorino JSON path string modifiers can also be applied to the selectors within the when
conditions of an AuthPolicy.
+Authorino JSON path string modifiers can also be applied to the selectors within the when
conditions of an AuthPolicy.
Examples¶
Check out the following user guides for examples of protecting services with Kuadrant:
@@ -4694,7 +4694,7 @@ Limitati
Once again, requests to app.io/foo
will be protected under AuthPolicy policy-1
, while requests to app.io/bar
will not be protected under any policy at all, unlike expected gateway policy policy-2
enforced as default. Both policies will report status condition as Enforced
nonetheless.
To avoid these problems, use different hostnames in each route.
Implementation details¶
-Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy
and an Authorino AuthConfig
custom resources.
+Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy
and an Authorino AuthConfig
custom resources.
Only requests that matches the rules in the Istio AuthorizationPolicy
cause an authorization request to be sent to the external authorization service ("Authorino"), i.e., only requests directed to the HTTPRouteRules targeted by the AuthPolicy (directly or indirectly), according to the declared top-level route selectors (if present), or all requests for which a matching HTTPRouteRule exists (otherwise).
Authorino looks up for the auth scheme (AuthConfig
custom resource) to enforce using the provided hostname of the original request as key. It then checks again if the request matches at least one of the selected HTTPRouteRules, in which case it enforces the auth scheme.
diff --git a/0.8.0/kuadrant-operator/doc/install/install-openshift/index.html b/0.8.0/kuadrant-operator/doc/install/install-openshift/index.html
index 5e242052..653b4eb4 100644
--- a/0.8.0/kuadrant-operator/doc/install/install-openshift/index.html
+++ b/0.8.0/kuadrant-operator/doc/install/install-openshift/index.html
@@ -4291,7 +4291,7 @@ Step 4 - Optional:
- OpenShift remote write configuration
- Kube Thanos
-The example dashboards and alerts for observing Kuadrant functionality use low-level CPU metrics and network metrics available from the user monitoring stack in OpenShift. They also use resource state metrics from Gateway API and Kuadrant resources.
+The example dashboards and alerts for observing Kuadrant functionality use low-level CPU metrics and network metrics available from the user monitoring stack in OpenShift. They also use resource state metrics from Gateway API and Kuadrant resources.
To scrape these additional metrics, you can install a kube-state-metrics instance
, with a custom resource configuration as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/kube-state-metrics.yaml
kubectl apply -k https://github.com/Kuadrant/gateway-api-state-metrics?ref=main
@@ -4299,7 +4299,7 @@ Step 4 - Optional:
To enable request metrics in Istio, you must create a telemetry
resource as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/telemetry.yaml
-If you have Grafana installed in your cluster, you can import the example dashboards and alerts.
+If you have Grafana installed in your cluster, you can import the example dashboards and alerts.
For example installation details, see installing Grafana on OpenShift. When installed, you must add your Thanos instance as a data source to Grafana. Alternatively, if you are using only the user workload monitoring stack in your OpenShift cluster, and not writing metrics to an external Thanos instance, you can set up a data source to the thanos-querier route in the OpenShift cluster.
Step 5 - Create secrets for your credentials¶
Before installing the Kuadrant Operator, you must enter the following commands to set up secrets that you will use later:
diff --git a/0.8.0/kuadrant-operator/doc/observability/examples/index.html b/0.8.0/kuadrant-operator/doc/observability/examples/index.html
index 5bfb703c..f89c1f5a 100644
--- a/0.8.0/kuadrant-operator/doc/observability/examples/index.html
+++ b/0.8.0/kuadrant-operator/doc/observability/examples/index.html
@@ -4042,11 +4042,11 @@ Importing Dashboards into Grafana
ConfigMap Method: Automate dashboard provisioning by adding files to a ConfigMap, which should be mounted at /etc/grafana/provisioning/dashboards
.
-Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.
+Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.
Alerts¶
Setting Up Alerts in Prometheus¶
Integrate alerts into Prometheus using a PrometheusRule
resource. Adjust alert thresholds to meet your specific operational needs.
-Further information on the metrics used for these alerts can be found on the metrics page.
+Further information on the metrics used for these alerts can be found on the metrics page.
diff --git a/0.8.0/kuadrant-operator/doc/observability/tracing/index.html b/0.8.0/kuadrant-operator/doc/observability/tracing/index.html
index 0af5f301..b3e1ffe5 100644
--- a/0.8.0/kuadrant-operator/doc/observability/tracing/index.html
+++ b/0.8.0/kuadrant-operator/doc/observability/tracing/index.html
@@ -4123,7 +4123,7 @@ Troubleshooting Flow Using T
If you centrally aggregate logs using something like promtail and loki, you can jump between trace information and the relevant logs for that service:
Using a combination of tracing and logs, you can visualise and troubleshoot reuqest timing issues and drill down to specific services.
-This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.
+This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.
diff --git a/0.8.0/kuadrant-operator/doc/reference/authpolicy/index.html b/0.8.0/kuadrant-operator/doc/reference/authpolicy/index.html
index 62b8e155..e332f8d8 100644
--- a/0.8.0/kuadrant-operator/doc/reference/authpolicy/index.html
+++ b/0.8.0/kuadrant-operator/doc/reference/authpolicy/index.html
@@ -4324,7 +4324,7 @@ AuthPolicySpecPatternExpressionOrRef
+[]PatternExpressionOrRef
No
List of implicit default additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway.
@@ -4373,7 +4373,7 @@ AuthPolicyCommonSpecPatternExpressionOrRef
+[]PatternExpressionOrRef
No
List of additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway.
@@ -4441,13 +4441,13 @@ AuthRuleCommonPatternExpressionOrRef
+[]PatternExpressionOrRef
No
List of additional dynamic conditions (expressions) to activate the auth rule. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway.
cache
-Caching spec
+Caching spec
No
Caching options for the resolved object returned when applying this auth rule. (Default: disabled)
@@ -4478,61 +4478,61 @@ AuthenticationRuleAPI Key authentication spec
+API Key authentication spec
No
Authentication based on API keys stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
kubernetesTokenReview
-KubernetesTokenReview spec
+KubernetesTokenReview spec
No
Authentication by Kubernetes token review. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
jwt
-JWT verification spec
+JWT verification spec
No
Authentication based on JSON Web Tokens (JWT). Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
oauth2Introspection
-OAuth2 Token Introscpection spec
+OAuth2 Token Introscpection spec
No
Authentication by OAuth2 token introspection. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
x509
-X.509 authentication spec
+X.509 authentication spec
No
Authentication based on client X.509 certificates. The certificates presented by the clients must be signed by a trusted CA whose certificates are stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
plain
-Plain identity object spec
+Plain identity object spec
No
Identity object extracted from the context. Use this method when authentication is performed beforehand by a proxy and the resulting object passed to Authorino as JSON in the auth request. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
anonymous
-Anonymous access
+Anonymous access
No
Anonymous access. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
.
credentials
-Auth credentials spec
+Auth credentials spec
No
Customizations to where credentials are required to be passed in the request for authentication based on this auth rule. Defaults to HTTP Authorization header with prefix "Bearer".
overrides
-Identity extension spec
+Identity extension spec
No
JSON overrides to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc).
defaults
-Identity extension spec
+Identity extension spec
No
JSON defaults to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc).
@@ -4557,19 +4557,19 @@ MetadataRule
http
-HTTP GET/GET-by-POST external metadata spec
+HTTP GET/GET-by-POST external metadata spec
No
External source of auth metadata via HTTP request. Use one of: http
, userInfo
, uma
.
userInfo
-OIDC UserInfo spec
+OIDC UserInfo spec
No
OpendID Connect UserInfo linked to an OIDC authentication rule declared in this same AuthPolicy. Use one of: http
, userInfo
, uma
.
uma
-UMA metadata spec
+UMA metadata spec
No
User-Managed Access (UMA) source of resource data. Use one of: http
, userInfo
, uma
.
@@ -4594,25 +4594,25 @@ AuthorizationRulePattern-matching authorization spec
+Pattern-matching authorization spec
No
Pattern-matching authorization rules. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
.
opa
-OPA authorization spec
+OPA authorization spec
No
Open Policy Agent (OPA) Rego policy. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
.
kubernetesSubjectAccessReview
-Kubernetes SubjectAccessReview spec
+Kubernetes SubjectAccessReview spec
No
Authorization by Kubernetes SubjectAccessReview. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
.
spicedb
-SpiceDB authorization spec
+SpiceDB authorization spec
No
Authorization decision delegated to external Authzed/SpiceDB server. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
.
@@ -4637,13 +4637,13 @@ ResponseSpec
unauthenticated
-Custom denial status spec
+Custom denial status spec
No
Customizations on the denial status and other HTTP attributes when the request is unauthenticated. (Default: 401 Unauthorized
)
unauthorized
-Custom denial status spec
+Custom denial status spec
No
Customizations on the denial status and other HTTP attributes when the request is unauthorized. (Default: 403 Forbidden
)
@@ -4693,19 +4693,19 @@ SuccessResponseItemPlain text response item
+Plain text response item
No
Plain text content. Use one of: plain
, json
, wristband
.
json
-JSON injection response item
+JSON injection response item
No
Specification of a JSON object. Use one of: plain
, json
, wristband
.
wristband
-Festival Wristband token response item
+Festival Wristband token response item
No
Specification of a JSON object. Use one of: plain
, json
, wristband
.
@@ -4730,7 +4730,7 @@ CallbackRule
http
-HTTP endpoints callback spec
+HTTP endpoints callback spec
No
HTTP endpoint settings to build the callback request (webhook).
diff --git a/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/index.html b/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/index.html
index 51a832d4..ff63468a 100644
--- a/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/index.html
+++ b/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/index.html
@@ -4493,7 +4493,7 @@ ⑤ Gr
Yes, you can.
The example above is for non-resource URL Kubernetes roles. For using Roles
and RoleBindings
instead of
ClusterRoles
and ClusterRoleBindings
, thus more flexible resource-based permissions to protect the API,
- see the spec for Kubernetes SubjectAccessReview authorization
+ see the spec for Kubernetes SubjectAccessReview authorization
in the Authorino docs.
Try the API with permission¶
diff --git a/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/index.html b/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/index.html
index 7a9ff25b..e33bdbd1 100644
--- a/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/index.html
+++ b/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/index.html
@@ -4971,7 +4971,7 @@ Step 2 - Set up HTTPRoute and backe
Step 3 - Use OAS to define your HTTPRoute rules¶
You can generate Kuadrant and Gateway API resources directly from OAS documents by using an x-kuadrant
extension.
-NOTE: For a more in-depth look at the OAS extension, see the kuadrantctl documentation.
+NOTE: For a more in-depth look at the OAS extension, see the kuadrantctl documentation.
You will use kuadrantctl
to generate your HTTPRoute
.
NOTE: The sample OAS has some placeholders for namespaces and domains. You will inject valid values into these placeholders based on your previous environment variables.
Generate the resource from your OAS as follows, (envsubst
will replace the placeholders):
@@ -5037,7 +5037,7 @@ Step 5 - Set up API key auth flowOptional: Step 6 - Set up OpenID Connect auth flow (skip if using API key only)¶
This section of the walkthrough uses the kuadrantctl
tool to create an AuthPolicy
that integrates with an OpenID provider and a RateLimitPolicy
that leverages JWT values for per-user rate limiting. It is important to note that OpenID requires an external provider. Therefore, you should adapt the following example to suit your specific needs and provider.
The platform engineer workflow established default policies for authentication and rate limiting at your Gateway. The new developer-defined policies, which you will create, are intended to target your HTTPRoute and will supersede the existing policies for requests to your API endpoints, similar to your previous API key example.
-The example OAS uses Kuadrant-based extensions. These extensions enable you to define routing and service protection requirements. For more details, see OpenAPI Kuadrant extensions.
+The example OAS uses Kuadrant-based extensions. These extensions enable you to define routing and service protection requirements. For more details, see OpenAPI Kuadrant extensions.
Prerequisites¶
- You have installed and configured an OpenID Connect provider, such as https://www.keycloak.org/.
@@ -5078,7 +5078,7 @@ Test your OpenID AuthPolicyStep 7 - Set up rate limiting¶
-
Lastly, you can generate your RateLimitPolicy
to add your rate limits, based on your OAS file. Rate limiting is simplified for this walkthrough and is based on either the bearer token or the API key value. There are more advanced examples in the How-to guides on the Kuadrant documentation site, for example: Authenticated rate limiting with JWTs and Kubernetes RBAC.
+Lastly, you can generate your RateLimitPolicy
to add your rate limits, based on your OAS file. Rate limiting is simplified for this walkthrough and is based on either the bearer token or the API key value. There are more advanced examples in the How-to guides on the Kuadrant documentation site, for example: Authenticated rate limiting with JWTs and Kubernetes RBAC.
You can continue to use this sample OAS document, which includes both authentication and a rate limit:
diff --git a/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect/index.html b/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect/index.html
index 32e84e12..1165fa05 100644
--- a/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect/index.html
+++ b/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect/index.html
@@ -4103,7 +4103,7 @@
Secure, protect, and connect services with Kuadrant on Kubernetes¶
Prerequisites¶
-- You have completed the Single-cluster Quick Start or Multi-cluster Quick Start.
+- You have completed the Single-cluster Quick Start or Multi-cluster Quick Start.
Overview¶
In this guide, we will cover the different policies from Kuadrant and how you can use them to secure, protect and connect an Istio-controlled gateway in a single cluster, and how you can set more refined protection on the HTTPRoutes exposed by that gateway.
diff --git a/0.8.0/kuadrant-operator/index.html b/0.8.0/kuadrant-operator/index.html
index 2b944a56..e1246c89 100644
--- a/0.8.0/kuadrant-operator/index.html
+++ b/0.8.0/kuadrant-operator/index.html
@@ -4382,7 +4382,7 @@ Provided APIsLimitador CR
-Authorino CRD
+Authorino CRD
Authorino Operator
Represents an instance of Authorino
Authorino CR
diff --git a/0.8.0/search/search_index.json b/0.8.0/search/search_index.json
index 991b7287..ada8fc7c 100644
--- a/0.8.0/search/search_index.json
+++ b/0.8.0/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"Kuadrant combines Gateway API and Istio-based gateway controllers to enhance application connectivity. It enables platform engineers and application developers to easily connect, secure, and protect their services and infrastructure across multiple clusters with policies for TLS, DNS, application authentication & authorization, and rate limiting. Additionally, Kuadrant offers observability templates to further support infrastructure management.
"},{"location":"#getting-started","title":"Getting Started","text":"For a quick local setup of Kuadrant, see our Single Cluster or Multi Cluster guides. Explore the single and multi-cluster architecture in our Architectural Overview.
"},{"location":"getting-started-multi-cluster-ocm/","title":"Getting Started with OCM","text":""},{"location":"getting-started-multi-cluster-ocm/#kuadrant-getting-started-multi-cluster","title":"Kuadrant Getting Started - Multi Cluster","text":""},{"location":"getting-started-multi-cluster-ocm/#prerequisites","title":"Prerequisites","text":" - Docker
- Kind
- Kubectl
- OpenSSL >= 3
- AWS account with Route 53 enabled or GCP with Cloud DNS enabled
- Docker Mac Net Connect (macOS users only)
"},{"location":"getting-started-multi-cluster-ocm/#dns-environmental-variables","title":"DNS Environmental Variables","text":"Export environment variables with the keys listed below for your desired provider. Fill in your own values as appropriate. Note that you will need to have created a root domain in AWS Route 53 or in GCP Cloud DNS:
"},{"location":"getting-started-multi-cluster-ocm/#aws","title":"AWS","text":"Env Var Example Value Description MGC_ZONE_ROOT_DOMAIN
jbloggs.hcpapps.net
Hostname for the root Domain MGC_AWS_DNS_PUBLIC_ZONE_ID
Z01234567US0IQE3YLO00
AWS Route 53 Zone ID for specified MGC_ZONE_ROOT_DOMAIN
MGC_AWS_ACCESS_KEY_ID
AKIA1234567890000000
Access Key ID, for user with permissions to Route 53 in the account where root domain is created MGC_AWS_SECRET_ACCESS_KEY
Z01234567US0000000
Access Secret Access Key, for user with permissions to Route 53 in the account where root domain is created MGC_AWS_REGION
eu-west-1
AWS Region"},{"location":"getting-started-multi-cluster-ocm/#gcp","title":"GCP","text":"Env Var Example Value Description GOOGLE
{\"client_id\": \"00000000-00000000000000.apps.googleusercontent.com\",\"client_secret\": \"d-FL95Q00000000000000\",\"refresh_token\": \"00000aaaaa00000000-AAAAAAAAAAAAKFGJFJDFKDK\",\"type\": \"authorized_user\"}
This is the JSON created from either the JSON credentials created by the Google Cloud CLI or a Service account PROJECT_ID
my_project_id
ID to the google project ZONE_NAME
jbloggs-google
Zone name ZONE_DNS_NAME
jbloggs.google.hcpapps.net
DNS name LOG_LEVEL
1
Log level for the Controller Alternatively, to set defaults, add the above environment variables to your .zshrc
or .bash_profile
.
"},{"location":"getting-started-multi-cluster-ocm/#set-the-release-you-want-to-use","title":"Set the release you want to use","text":"export MGC_BRANCH=release-0.3\n
"},{"location":"getting-started-multi-cluster-ocm/#set-up-clusters-and-install-kuadrant","title":"Set Up Clusters and install Kuadrant","text":"Run the following:
curl \"https://raw.githubusercontent.com/kuadrant/multicluster-gateway-controller/${MGC_BRANCH}/hack/quickstart-setup.sh\" | bash\n
"},{"location":"getting-started-multi-cluster-ocm/#whats-next","title":"What's Next","text":"Now that you have two Kind clusters configured with Kuadrant installed you are ready to begin the Multicluster Gateways walkthrough.
"},{"location":"getting-started-multi-cluster/","title":"Multi-Cluster","text":""},{"location":"getting-started-multi-cluster/#kuadrant-getting-started-multi-cluster","title":"Kuadrant Getting Started - Multi Cluster","text":""},{"location":"getting-started-multi-cluster/#overview","title":"Overview","text":"In this quick start, we will cover the setup of Kuadrant in multiple local kind clusters. This document is intended as a follow on to the single cluster guide. It can be used for adding 1 or more clusters to your local setup.
"},{"location":"getting-started-multi-cluster/#prerequisites","title":"Prerequisites","text":" - Completed the Single-cluster Quick Start
"},{"location":"getting-started-multi-cluster/#environmental-variables","title":"Environmental Variables","text":"The same environment variable requirements from the Single-cluster Quick Start apply to this document, including the KUADRANT_REF
variable.
"},{"location":"getting-started-multi-cluster/#set-up-a-kind-cluster-and-install-kuadrant","title":"Set Up a kind cluster and install Kuadrant","text":"Run the same quickstart script from the single cluster quick start:
curl \"https://raw.githubusercontent.com/kuadrant/kuadrant-operator/${KUADRANT_REF}/hack/quickstart-setup.sh\" | bash\n
The script will detect if you already have a cluster from the single cluster setup running, and prompt you for a multi cluster setup. This will setup an additional kind cluster, install Istio and install Kuadrant. You can re-run the script multiple times to add more clusters. Each cluster will have a number suffix in the name. For example: kuadrant-local-1
, kuadrant-local-2
, kuadrant-local-3
. The original cluster from the single cluster setup will keep its name of just kuadrant-local
.
"},{"location":"getting-started-multi-cluster/#clean-up","title":"Clean Up","text":"To ensure that any DNS records are removed, you should remove any DNSPolicy
and TLSPolicy
resources before deleting the local cluster.
"},{"location":"getting-started-multi-cluster/#whats-next","title":"What's Next","text":"The next step is to setup and use the policies provided by Kuadrant.
Secure, Protect and Connect your Gateway
"},{"location":"getting-started-single-cluster/","title":"Single-Cluster","text":""},{"location":"getting-started-single-cluster/#kuadrant-getting-started-single-cluster","title":"Kuadrant Getting Started - Single Cluster","text":""},{"location":"getting-started-single-cluster/#overview","title":"Overview","text":"In this quick start, we will cover:
- setup of Kuadrant in a singe local kind cluster
"},{"location":"getting-started-single-cluster/#prerequisites","title":"Prerequisites","text":" - Docker
- Kind
- Kubectl
- OpenSSL >= 3
- AWS account with Route 53 enabled or GCP with Cloud DNS enabled
- Docker Mac Net Connect (macOS users only)
"},{"location":"getting-started-single-cluster/#environmental-variables","title":"Environmental Variables","text":""},{"location":"getting-started-single-cluster/#general","title":"General","text":"Env Var Example Value Description ISTIO_INSTALL_SAIL
true
Whether to install istio through project sail, default false
If you want to make use of the Kuadrant DNSPolicy
you should setup the following environmental variables depending on your DNS Provider:
"},{"location":"getting-started-single-cluster/#aws","title":"AWS","text":"Env Var Example Value Description KUADRANT_ZONE_ROOT_DOMAIN
jbloggs.hcpapps.net
Hostname for the root Domain KUADRANT_AWS_DNS_PUBLIC_ZONE_ID
Z01234567US0IQE3YLO00
AWS Route 53 Zone ID for specified KUADRANT_ZONE_ROOT_DOMAIN
KUADRANT_AWS_ACCESS_KEY_ID
AKIA1234567890000000
Access Key ID, for user with permissions to Route 53 in the account where root domain is created KUADRANT_AWS_SECRET_ACCESS_KEY
Z01234567US0000000
Access Secret Access Key, for user with permissions to Route 53 in the account where root domain is created KUADRANT_AWS_REGION
eu-west-1
AWS Region"},{"location":"getting-started-single-cluster/#gcp","title":"GCP","text":"Env Var Example Value Description GOOGLE
{\"client_id\": \"00000000-00000000000000.apps.googleusercontent.com\",\"client_secret\": \"d-FL95Q00000000000000\",\"refresh_token\": \"00000aaaaa00000000-AAAAAAAAAAAAKFGJFJDFKDK\",\"type\": \"authorized_user\"}
This is the JSON created from either the JSON credentials created by the Google Cloud CLI or a Service account PROJECT_ID
my_project_id
ID to the google project ZONE_NAME
jbloggs-google
Zone name ZONE_DNS_NAME
jbloggs.google.hcpapps.net
DNS name LOG_LEVEL
1
Log level for the Controller Alternatively, to set defaults, add the above environment variables to your .zshrc
or .bash_profile
.
"},{"location":"getting-started-single-cluster/#set-the-release-you-want-to-use","title":"Set the release you want to use","text":"export KUADRANT_REF=v0.8.0\nexport ISTIO_INSTALL_SAIL=true\n
"},{"location":"getting-started-single-cluster/#set-up-a-kind-cluster-and-install-kuadrant","title":"Set Up a kind cluster and install Kuadrant","text":"Run the following:
curl \"https://raw.githubusercontent.com/kuadrant/kuadrant-operator/${KUADRANT_REF}/hack/quickstart-setup.sh\" | bash\n
This will setup a single kind cluster, install Istio and install Kuadrant. Once this completes you should be able to move on to using the various policy apis offered by Kuadrant."},{"location":"getting-started-single-cluster/#clean-up","title":"Clean Up","text":"To ensure that any DNS records are removed, you should remove any DNSPolicy
and TLSPolicy
resources before deleting the local cluster.
"},{"location":"getting-started-single-cluster/#whats-next","title":"What's Next","text":"The next step is to setup and use the policies provided by Kuadrant.
Secure, Protect and Connect your Gateway
"},{"location":"kuadrant-operator/","title":"Kuadrant Operator","text":"The Operator to install and manage the lifecycle of the Kuadrant components deployments.
"},{"location":"kuadrant-operator/#overview","title":"Overview","text":"Kuadrant is a re-architecture of API Management using Cloud Native concepts and separating the components to be less coupled, more reusable and leverage the underlying kubernetes platform. It aims to deliver a smooth experience to providers and consumers of applications & services when it comes to rate limiting, authentication, authorization, discoverability, change management, usage contracts, insights, etc.
Kuadrant aims to produce a set of loosely coupled functionalities built directly on top of Kubernetes. Furthermore, it only strives to provide what Kubernetes doesn\u2019t offer out of the box, i.e. Kuadrant won\u2019t be designing a new Gateway/proxy, instead it will opt to connect with what\u2019s there and what\u2019s being developed (think Envoy, Istio, GatewayAPI).
Kuadrant is a system of cloud-native k8s components that grows as users\u2019 needs grow.
- From simple protection of a Service (via AuthN) that is used by teammates working on the same cluster, or \u201csibling\u201d services, up to AuthZ of users using OIDC plus custom policies.
- From no rate-limiting to rate-limiting for global service protection on to rate-limiting by users/plans
"},{"location":"kuadrant-operator/#architecture","title":"Architecture","text":"Kuadrant relies on Istio and the Gateway API to operate the cluster (Istio's) ingress gateway to provide API management with authentication (authN), authorization (authZ) and rate limiting capabilities.
"},{"location":"kuadrant-operator/#kuadrant-components","title":"Kuadrant components","text":"CRD Description Control Plane The control plane takes the customer desired configuration (declaratively as kubernetes custom resources) as input and ensures all components are configured to obey customer's desired behavior. This repository contains the source code of the kuadrant control plane Kuadrant Operator A Kubernetes Operator to manage the lifecycle of the kuadrant deployment Authorino The AuthN/AuthZ enforcer. As the external istio authorizer (envoy external authorization serving gRPC service) Limitador The external rate limiting service. It exposes a gRPC service implementing the Envoy Rate Limit protocol (v3) Authorino Operator A Kubernetes Operator to manage Authorino instances Limitador Operator A Kubernetes Operator to manage Limitador instances DNS Operator A Kubernetes Operator to manage DNS records in external providers"},{"location":"kuadrant-operator/#provided-apis","title":"Provided APIs","text":"The kuadrant control plane owns the following Custom Resource Definitions, CRDs:
CRD Description Example AuthPolicy CRD [doc] [reference] Enable AuthN and AuthZ based access control on workloads AuthPolicy CR RateLimitPolicy CRD [doc] [reference] Enable access control on workloads based on HTTP rate limiting RateLimitPolicy CR DNSPolicy CRD [doc] [reference] Enable DNS management DNSPolicy CR TLSPolicy CRD [doc] [reference] Enable TLS management TLSPolicy CR Additionally, Kuadrant provides the following CRDs
CRD Owner Description Example Kuadrant CRD Kuadrant Operator Represents an instance of kuadrant Kuadrant CR Limitador CRD Limitador Operator Represents an instance of Limitador Limitador CR Authorino CRD Authorino Operator Represents an instance of Authorino Authorino CR "},{"location":"kuadrant-operator/#getting-started","title":"Getting started","text":""},{"location":"kuadrant-operator/#pre-requisites","title":"Pre-requisites","text":" - Istio is installed in the cluster. Otherwise, refer to the Istio getting started guide.
- Kubernetes Gateway API is installed in the cluster. Otherwise, configure Istio to expose a service using the Kubernetes Gateway API.
- cert-manager is installed in the cluster. Otherwise, refer to the cert-manager installation guide.
"},{"location":"kuadrant-operator/#installing-kuadrant","title":"Installing Kuadrant","text":"Installing Kuadrant is a two-step procedure. Firstly, install the Kuadrant Operator and secondly, request a Kuadrant instance by creating a Kuadrant custom resource.
"},{"location":"kuadrant-operator/#1-install-the-kuadrant-operator","title":"1. Install the Kuadrant Operator","text":"The Kuadrant Operator is available in public community operator catalogs, such as the Kubernetes OperatorHub.io and the Openshift Container Platform and OKD OperatorHub.
Kubernetes
The operator is available from OperatorHub.io. Just go to the linked page and follow installation steps (or just run these two commands):
# Install Operator Lifecycle Manager (OLM), a tool to help manage the operators running on your cluster.\n\ncurl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.23.1/install.sh | bash -s v0.23.1\n\n# Install the operator by running the following command:\n\nkubectl create -f https://operatorhub.io/install/kuadrant-operator.yaml\n
Openshift
The operator is available from the Openshift Console OperatorHub. Just follow installation steps choosing the \"Kuadrant Operator\" from the catalog:
"},{"location":"kuadrant-operator/#2-request-a-kuadrant-instance","title":"2. Request a Kuadrant instance","text":"Create the namespace:
kubectl create namespace kuadrant\n
Apply the Kuadrant
custom resource:
kubectl -n kuadrant apply -f - <<EOF\n---\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant-sample\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/#protect-your-service","title":"Protect your service","text":""},{"location":"kuadrant-operator/#if-you-are-an-api-provider","title":"If you are an API Provider","text":" - Deploy the service/API to be protected (\"Upstream\")
- Expose the service/API using the kubernetes Gateway API, ie HTTPRoute object.
- Write and apply the Kuadrant's RateLimitPolicy and/or AuthPolicy custom resources targeting the HTTPRoute resource to have your API protected.
"},{"location":"kuadrant-operator/#if-you-are-a-cluster-operator","title":"If you are a Cluster Operator","text":" - (Optionally) deploy istio ingress gateway using the Gateway resource.
- Write and apply the Kuadrant's RateLimitPolicy and/or AuthPolicy custom resources targeting the Gateway resource to have your gateway traffic protected.
"},{"location":"kuadrant-operator/#user-guides","title":"User guides","text":"The user guides section of the docs gathers several use-cases as well as the instructions to implement them using kuadrant.
- Simple Rate Limiting for Application Developers
- Authenticated Rate Limiting for Application Developers
- Gateway Rate Limiting for Cluster Operators
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/#kuadrant-rate-limiting","title":"Kuadrant Rate Limiting","text":""},{"location":"kuadrant-operator/#documentation","title":"Documentation","text":"Docs can be found on the Kuadrant website.
"},{"location":"kuadrant-operator/#contributing","title":"Contributing","text":"The Development guide describes how to build the kuadrant operator and how to test your changes before submitting a patch or opening a PR.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"kuadrant-operator/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"kuadrant-operator/doc/auth/","title":"Kuadrant Auth","text":"A Kuadrant AuthPolicy custom resource:
- Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to enforce auth.
- Supports targeting subsets (sections) of a network resource to apply the auth rules to.
- Abstracts the details of the underlying external authorization protocol and configuration resources, that have a much broader remit and surface area.
- Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/auth/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/auth/#envoys-external-authorization-protocol","title":"Envoy's External Authorization Protocol","text":"Kuadrant's Auth implementation relies on the Envoy's External Authorization protocol. The workflow per request goes:
- On incoming request, the gateway checks the matching rules for enforcing the auth rules, as stated in the AuthPolicy custom resources and targeted Gateway API networking objects
- If the request matches, the gateway sends one CheckRequest to the external auth service (\"Authorino\").
- The external auth service responds with a CheckResponse back to the gateway with either an
OK
or DENIED
response code.
An AuthPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external auth service.
"},{"location":"kuadrant-operator/doc/auth/#the-authpolicy-custom-resource","title":"The AuthPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/auth/#overview","title":"Overview","text":"The AuthPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - Authentication/authorization scheme (
spec.rules
) - Top-level route selectors (
spec.routeSelectors
) - Top-level additional conditions (
spec.when
) - List of named patterns (
spec.patterns
)
The auth scheme specify rules for:
- Authentication (
spec.rules.authentication
) - External auth metadata fetching (
spec.rules.metadata
) - Authorization (
spec.rules.authorization
) - Custom response items (
spec.rules.response
) - Callbacks (
spec.rules.callbacks
)
Each auth rule can declare specific routeSelectors
and when
conditions for the rule to apply.
The auth scheme (rules
), as well as conditions and named patterns can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults
or overrides
blocks.
"},{"location":"kuadrant-operator/doc/auth/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-auth-policy\nspec:\n # Reference to an existing networking resource to attach the policy to. REQUIRED.\n # It can be a Gateway API HTTPRoute or Gateway resource.\n # It can only refer to objects in the same namespace as the AuthPolicy.\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n\n # Selectors of HTTPRouteRules within the targeted HTTPRoute that activate the AuthPolicy.\n # Each element contains a HTTPRouteMatch object that will be used to select HTTPRouteRules that include at least\n # one identical HTTPRouteMatch.\n # The HTTPRouteMatch part does not have to be fully identical, but the what's stated in the selector must be\n # identically stated in the HTTPRouteRule.\n # Do not use it on AuthPolicies that target a Gateway.\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n\n # Additional dynamic conditions to trigger the AuthPolicy.\n # Use it for filtering attributes not supported by HTTPRouteRule or with AuthPolicies that target a Gateway.\n # Check out https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md to learn more\n # about the Well-known Attributes that can be used in this field.\n # Equivalent to if otherwise declared within `defaults`.\n when: [\u2026]\n\n # Sets of common patterns of selector-operator-value triples, to be referred by name in `when` conditions\n # and pattern-matching rules. Often employed to avoid repetition in the policy.\n # Equivalent to if otherwise declared within `defaults`.\n patterns: {\u2026}\n\n # The auth rules to apply to the network traffic routed through the targeted resource.\n # Equivalent to if otherwise declared within `defaults`.\n rules:\n # Authentication rules to enforce.\n # At least one config must evaluate to a valid identity object for the auth request to be successful.\n # If omitted or empty, anonymous access is assumed.\n authentication:\n \"my-authn-rule\":\n # The authentication method of this rule.\n # One-of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous.\n apiKey: {\u2026}\n\n # Where credentials are required to be passed in the request for authentication based on this rule.\n # One-of: authorizationHeader, customHeader, queryString, cookie.\n credentials:\n authorizationHeader:\n prefix: APIKEY\n\n # Rule-level route selectors.\n routeSelectors: [\u2026]\n\n # Rule-level additional conditions.\n when: [\u2026]\n\n # Configs for caching the resolved object returned out of evaluating this auth rule.\n cache: {\u2026}\n\n # Rules for fetching auth metadata from external sources.\n metadata:\n \"my-external-source\":\n # The method for fetching metadata from the external source.\n # One-of: http: userInfo, uma.\n http: {\u2026}\n\n # Authorization rules to enforce.\n # All policies must allow access for the auth request be successful.\n authorization:\n \"my-authz-rule\":\n # The authorization method of this rule.\n # One-of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb.\n opa: {\u2026}\n\n # Customizations to the authorization response.\n response:\n # Custom denial status and other HTTP attributes for unauthenticated requests.\n unauthenticated: {\u2026}\n\n # Custom denial status and other HTTP attributes for unauhtorized requests.\n unauthorized: {\u2026}\n\n # Custom response items when access is granted.\n success:\n # Custom response items wrapped as HTTP headers to be injected in the request\n headers:\n \"my-custom-header\":\n # One-of: plain, json, wristband.\n plain: {\u2026}\n\n # Custom response items wrapped as envoy dynamic metadata.\n dynamicMetadata:\n # One-of: plain, json, wristband.\n \"my-custom-dyn-metadata\":\n json: {\u2026}\n\n # Rules for post-authorization callback requests to external services.\n # Triggered regardless of the result of the authorization request.\n callbacks:\n \"my-webhook\":\n http: {\u2026}\n\n # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n # routes that lack a more specific policy attached to.\n # Mutually exclusive with `overrides` and with declaring the `rules`, `when` and `patterns` at the top-level of\n # the spec.\n defaults:\n rules:\n authentication: {\u2026}\n metadata: {\u2026}\n authorization: {\u2026}\n response: {\u2026}\n callbacks: {\u2026}\n when: [\u2026]\n patterns: {\u2026}\n\n # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n # thus also overriding any more specific policy occasionally attached to any of those routes.\n # Mutually exclusive with `defaults` and with declaring `rules`, `when` and `patterns` at the top-level of\n # the spec.\n overrides:\n rules:\n authentication: {\u2026}\n metadata: {\u2026}\n authorization: {\u2026}\n response: {\u2026}\n callbacks: {\u2026}\n when: [\u2026]\n patterns: {\u2026}\n
Check out the API reference for a full specification of the AuthPolicy CRD.
"},{"location":"kuadrant-operator/doc/auth/#using-the-authpolicy","title":"Using the AuthPolicy","text":""},{"location":"kuadrant-operator/doc/auth/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"When an AuthPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs
field of the HTTPRoute.
The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets, by specifying the routeSelectors
field of the AuthPolicy spec.
Target a HTTPRoute by setting the spec.targetRef
field of the AuthPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-route-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: <HTTPRoute Name>\n rules: {\u2026}\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502 \u2502 (App namespace) \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 parentRefs \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u25b2 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 targetRef \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 \u2502 \u2502 AuthPolicy \u2502 \u2502\n\u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/auth/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"If an AuthPolicy targets a route defined for *.com
and another AuthPolicy targets another route for api.com
, the Kuadrant control plane will not merge these two AuthPolicies. Rather, it will mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and auth rules.
E.g., a request coming for api.com
will be protected according to the rules from the AuthPolicy that targets the route for api.com
; while a request for other.com
will be protected with the rules from the AuthPolicy targeting the route for *.com
.
Example with 3 AuthPolicies and 3 HTTPRoutes:
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy A will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy B will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy W will be enforced
"},{"location":"kuadrant-operator/doc/auth/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"An AuthPolicy that targets a Gateway can declare a block of defaults (spec.defaults
) or a block of overrides (spec.overrides
). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.
When declaring defaults, an AuthPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific AuthPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default AuthPolicy, as well as changes in the existing HTTPRoutes.
Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting preemptive \"deny-all\" policies on hostnames and hostname wildcards.
Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.
Target a Gateway HTTPRoute by setting the spec.targetRef
field of the AuthPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-gw-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n defaults: # alternatively: `overrides`\n rules: {\u2026}\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502 \u2502 (App namespace) \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 parentRefs \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u25b2 \u2502 \u2502 \u25b2 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 targetRef \u2502 \u2502 \u2502 targetRef \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 AuthPolicy \u2502 \u2502 \u2502 \u2502 AuthPolicy \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/auth/#overlapping-gateway-and-httproute-authpolicies","title":"Overlapping Gateway and HTTPRoute AuthPolicies","text":"Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.
Gateway AuthPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute AuthPolicy exists, in which case the HTTPRoute AuthPolicy prevails.
Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy G (defaults) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy A will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy B will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy W will be enforced - Request to
other.com
(suppose a route exists) \u2192 AuthPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
Gateway AuthPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute AuthPolicy.
Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy G (overrides) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
other.com
(suppose a route exists) \u2192 AuthPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
"},{"location":"kuadrant-operator/doc/auth/#route-selectors","title":"Route selectors","text":"Route selectors allow targeting sections of a HTTPRoute, by specifying sets of HTTPRouteMatches and/or hostnames that make the policy controller look up within the HTTPRoute spec for compatible declarations, and select the corresponding HTTPRouteRules and hostnames, to then build conditions that activate the policy or policy rule.
Check out Route selectors for a full description, semantics and API reference.
"},{"location":"kuadrant-operator/doc/auth/#when-conditions","title":"when
conditions","text":"when
conditions can be used to scope an AuthPolicy or auth rule within an AuthPolicy (i.e. to filter the traffic to which a policy or policy rule applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules via routeSelectors
.
Use when
conditions to conditionally activate policies and policy rules based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames
and spec.rules.matches
fields, or in general in AuthPolicies that target a Gateway.
when
conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.
The selectors within the when
conditions of an AuthPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.
Authorino JSON path string modifiers can also be applied to the selectors within the when
conditions of an AuthPolicy.
"},{"location":"kuadrant-operator/doc/auth/#examples","title":"Examples","text":"Check out the following user guides for examples of protecting services with Kuadrant:
- Enforcing authentication & authorization with Kuadrant AuthPolicy, for app developers and platform engineers
- Authenticated Rate Limiting for Application Developers
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/auth/#known-limitations","title":"Known limitations","text":" - One HTTPRoute can only be targeted by one AuthPolicy.
- One Gateway can only be targeted by one AuthPolicy.
- AuthPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the AuthPolicy.
- 2+ AuthPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/auth/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"Kuadrant currently does not support multiple AuthPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames
field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one AuthPolicy targeting any of those resources that specify identical hostnames is allowed.
Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.
This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that is honored by the API gateway at each specific request, in cases of conflicting hostnames.
To exemplify one way this limitation can impact deployments, consider the following topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2 \u25b2\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 AuthPolicy \u2502 \u2502 AuthPolicy \u2502\n \u2502 (policy-1) \u2502 \u2502 (policy-2) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
In the example above, with the policy-1
resource created before policy-2
, policy-1
will be enforced on all requests to app.io/foo
while policy-2
will be rejected. I.e. app.io/bar
will not be secured. In fact, the status conditions of policy-2
shall reflect Enforced=false
with message \"AuthPolicy has encountered some issues: AuthScheme is not ready yet\".
Notice the enforcement of policy-1
and no enforcement of policy-2
is the opposite behavior as the analogous problem with the Kuadrant RateLimitPolicy.
A slightly different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 AuthPolicy \u2502\n \u2502 \u2502 (policy-2) \u2502\n \u25bc \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 AuthPolicy \u2502\n \u2502 (policy-1) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Once again, requests to app.io/foo
will be protected under AuthPolicy policy-1
, while requests to app.io/bar
will not be protected under any policy at all, unlike expected gateway policy policy-2
enforced as default. Both policies will report status condition as Enforced
nonetheless.
To avoid these problems, use different hostnames in each route.
"},{"location":"kuadrant-operator/doc/auth/#implementation-details","title":"Implementation details","text":"Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy
and an Authorino AuthConfig
custom resources.
Only requests that matches the rules in the Istio AuthorizationPolicy
cause an authorization request to be sent to the external authorization service (\"Authorino\"), i.e., only requests directed to the HTTPRouteRules targeted by the AuthPolicy (directly or indirectly), according to the declared top-level route selectors (if present), or all requests for which a matching HTTPRouteRule exists (otherwise).
Authorino looks up for the auth scheme (AuthConfig
custom resource) to enforce using the provided hostname of the original request as key. It then checks again if the request matches at least one of the selected HTTPRouteRules, in which case it enforces the auth scheme.
Exception to the rule Due to limitations imposed by the Istio `AuthorizationPolicy`, there are a few patterns of HTTPRouteRules that cannot be translated to filters for the external authorization request. Therefore, the following patterns used in HTTPRouteMatches of top-level route selectors of an AuthPolicy will not be included in the Istio AuthorizationPolicy rules that trigger the check request with Authorino: `PathMatchRegularExpression`, `HeaderMatchRegularExpression`, and `HTTPQueryParamMatch`. As a consequence to the above, requests that do not match these rules and otherwise would not be checked with Authorino will result in a request to the external authorization service. Authorino nonetheless will still verify those patterns and ensure the auth scheme is enforced only when it matches a selected HTTPRouteRule. Users of Kuadrant may observe an unnecessary call to the authorization service in those cases where the request is out of the scope of the AuthPolicy and therefore always authorized."},{"location":"kuadrant-operator/doc/auth/#internal-custom-resources-and-namespaces","title":"Internal custom resources and namespaces","text":"While the Istio AuthorizationPolicy
needs to be created in the same namespace as the gateway workload, the Authorino AuthConfig
is created in the namespace of the AuthPolicy
itself. This allows to simplify references such as to Kubernetes Secrets referred in the AuthPolicy, as well as the RBAC to support the architecture.
"},{"location":"kuadrant-operator/doc/development/","title":"Development Guide","text":""},{"location":"kuadrant-operator/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - operator-sdk version v1.28.1
- kind version v0.22.0
- git
- go version 1.21+
- kubernetes version v1.19+
- kubectl version v1.19+
"},{"location":"kuadrant-operator/doc/development/#build","title":"Build","text":"make\n
"},{"location":"kuadrant-operator/doc/development/#run-locally","title":"Run locally","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Then, run the operator locally
make run\n
"},{"location":"kuadrant-operator/doc/development/#deploy-the-operator-in-a-deployment-object","title":"Deploy the operator in a deployment object","text":"make local-setup\n
List of tasks done by the command above:
- Create local cluster using kind
- Build kuadrant docker image from the current working directory
- Deploy Kuadrant control plane (including istio, authorino and limitador)
TODO: customize with custom authorino and limitador git refs. Make sure Makefile propagates variable to deploy
target
"},{"location":"kuadrant-operator/doc/development/#deploy-kuadrant-operator-using-olm","title":"Deploy kuadrant operator using OLM","text":"You can deploy kuadrant using OLM just running few commands. No need to build any image. Kuadrant engineering team provides latest
and release version tagged images. They are available in the Quay.io/Kuadrant image repository.
Create kind cluster
make kind-create-cluster\n
Deploy OLM system
make install-olm\n
Deploy kuadrant using OLM. The make deploy-catalog
target accepts the following variables:
Makefile Variable Description Default value CATALOG_IMG
Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
"},{"location":"kuadrant-operator/doc/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"If you want to deploy (using OLM) a custom kuadrant operator, you need to build your own catalog. Furthermore, if you want to deploy a custom limitador or authorino operator, you also need to build your own catalog. The kuadrant operator bundle includes the authorino or limtador operator dependency version, hence using other than latest
version requires a custom kuadrant operator bundle and a custom catalog including the custom bundle.
"},{"location":"kuadrant-operator/doc/development/#build-kuadrant-operator-bundle-image","title":"Build kuadrant operator bundle image","text":"The make bundle
target accepts the following variables:
Makefile Variable Description Default value Notes IMG
Kuadrant operator image URL quay.io/kuadrant/kuadrant-operator:latest
TAG
var could be use to build this URL, defaults to latest if not provided VERSION
Bundle version 0.0.0
LIMITADOR_OPERATOR_BUNDLE_IMG
Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest
LIMITADOR_OPERATOR_VERSION
var could be used to build this, defaults to latest if not provided AUTHORINO_OPERATOR_BUNDLE_IMG
Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest
AUTHORINO_OPERATOR_VERSION
var could be used to build this, defaults to latest if not provided DNS_OPERATOR_BUNDLE_IMG
DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest
DNS_OPERATOR_BUNDLE_IMG
var could be used to build this, defaults to latest if not provided RELATED_IMAGE_WASMSHIM
WASM shim image URL oci://quay.io/kuadrant/wasm-shim:latest
WASM_SHIM_VERSION
var could be used to build this, defaults to latest if not provided - Build the bundle manifests
make bundle [IMG=quay.io/kuadrant/kuadrant-operator:latest] \\\n [VERSION=0.0.0] \\\n [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \\\n [RELATED_IMAGE_WASMSHIM=oci://quay.io/kuadrant/wasm-shim:latest]\n
- Build the bundle image from the manifests
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
- Push the bundle image to a registry
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
Frequently, you may need to build custom kuadrant bundle with the default (latest
) Limitador and Authorino bundles. These are the example commands to build the manifests, build the bundle image and push to the registry.
In the example, a new kuadrant operator bundle version 0.8.0
will be created that references the kuadrant operator image quay.io/kuadrant/kuadrant-operator:v0.5.0
and latest Limitador and Authorino bundles.
# manifests\nmake bundle IMG=quay.io/kuadrant/kuadrant-operator:v0.5.0 VERSION=0.8.0\n\n# bundle image\nmake bundle-build BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n\n# push bundle image\nmake bundle-push BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n
"},{"location":"kuadrant-operator/doc/development/#build-custom-catalog","title":"Build custom catalog","text":"The catalog's format will be File-based Catalog.
Make sure all the required bundles are pushed to the registry. It is required by the opm
tool.
The make catalog
target accepts the following variables:
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
LIMITADOR_OPERATOR_BUNDLE_IMG
Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest
AUTHORINO_OPERATOR_BUNDLE_IMG
Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest
DNS_OPERATOR_BUNDLE_IMG
DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest
make catalog [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest] \\\n [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest]\n
- Build the catalog image from the manifests
Makefile Variable Description Default value CATALOG_IMG
Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make catalog-build [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
- Push the catalog image to a registry
make catalog-push [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
You can try out your custom catalog image following the steps of the Deploy kuadrant operator using OLM section.
"},{"location":"kuadrant-operator/doc/development/#cleaning-up","title":"Cleaning up","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/development/#run-tests","title":"Run tests","text":""},{"location":"kuadrant-operator/doc/development/#unittests","title":"Unittests","text":"make test-unit\n
Optionally, add TEST_NAME
makefile variable to run specific test
make test-unit TEST_NAME=TestLimitIndexEquals\n
or even subtest
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
"},{"location":"kuadrant-operator/doc/development/#integration-tests","title":"Integration tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Run integration tests
make test-integration\n
"},{"location":"kuadrant-operator/doc/development/#all-tests","title":"All tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Run all tests
make test\n
"},{"location":"kuadrant-operator/doc/development/#lint-tests","title":"Lint tests","text":"make run-lint\n
"},{"location":"kuadrant-operator/doc/development/#uninstall-kuadrant-crds","title":"(Un)Install Kuadrant CRDs","text":"You need an active session open to a kubernetes cluster.
Remove CRDs
make uninstall\n
"},{"location":"kuadrant-operator/doc/dns/","title":"Kuadrant DNS","text":"A Kuadrant DNSPolicy custom resource:
- Targets Gateway API networking resources Gateways to provide dns management by managing the lifecycle of dns records in external dns providers such as AWS Route53 and Google DNS.
"},{"location":"kuadrant-operator/doc/dns/#how-it-works","title":"How it works","text":"A DNSPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external DNS service. The needed dns names are gathered from the listener definitions and the IPAdresses | CNAME hosts are gathered from the status block of the gateway resource.
"},{"location":"kuadrant-operator/doc/dns/#the-dnspolicy-custom-resource","title":"The DNSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/dns/#overview","title":"Overview","text":"The DNSPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - DNS Routing Strategy (
spec.routingStrategy
) - LoadBalancing specification (
spec.loadBalancing
) - HealthCheck specification (
spec.healthCheck
)
"},{"location":"kuadrant-operator/doc/dns/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: my-dns-policy\nspec:\n # reference to an existing networking resource to attach the policy to\n # it can only be a Gateway API Gateway resource\n # it can only refer to objects in the same namespace as the DNSPolicy\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: mygateway\n\n # (optional) routing strategy to use when creating DNS records, defaults to `loadbalanced`\n # determines what DNS records are created in the DNS provider\n # check out Kuadrant RFC 0005 https://github.com/Kuadrant/architecture/blob/main/rfcs/0005-single-cluster-dnspolicy.md to learn more about the Routing Strategy field\n # One-of: simple, loadbalanced.\n routingStrategy: loadbalanced\n\n # (optional) loadbalancing specification\n # use it for providing the specification of how dns will be configured in order to provide balancing of load across multiple clusters when using the `loadbalanced` routing strategy\n # Primary use of this is for multi cluster deployments\n # check out Kuadrant RFC 0003 https://github.com/Kuadrant/architecture/blob/main/rfcs/0003-dns-policy.md to learn more about the options that can be used in this field\n loadBalancing:\n # (optional) weighted specification\n # use it to control the weight value applied to records\n weighted:\n # use it to change the weight of a record based on labels applied to the target meta resource i.e. Gateway in a single cluster context or ManagedCluster in multi cluster with OCM\n custom:\n\n - weight: 200\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n # (optional) weight value that will be applied to weighted dns records by default. Integer greater than 0 and no larger than the maximum value accepted by the target dns provider, defaults to `120` \n defaultWeight: 100\n # (optional) geo specification\n # use it to control the geo value applied to records \n geo:\n # (optional) default geo to be applied to records \n defaultGeo: IE\n\n # (optional) health check specification\n # health check probes with the following specification will be created for each DNS target\n healthCheck:\n allowInsecureCertificates: true\n endpoint: /\n expectedResponses:\n\n - 200\n - 201\n - 301\n failureThreshold: 5\n port: 443\n protocol: https\n
Check out the API reference for a full specification of the DNSPolicy CRD.
"},{"location":"kuadrant-operator/doc/dns/#using-the-dnspolicy","title":"Using the DNSPolicy","text":""},{"location":"kuadrant-operator/doc/dns/#dns-provider-and-managedzone-setup","title":"DNS Provider and ManagedZone Setup","text":"A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about dns providers, and what domains these dns providers are currently hosting. This is done through the creation of ManagedZones and dns provider secrets containing the credentials for the dns provider account.
If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net
:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: my-gw\nspec:\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: echo.apps.hcpapps.net\n port: 80\n protocol: HTTP\n
In order for the DNSPolicy to act upon that listener, a ManagedZone must exist for that hostnames' domain.
apiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: apps.hcpapps.net\nspec:\n domainName: apps.hcpapps.net\n description: \"apps.hcpapps.net managed domain\"\n dnsProviderSecretRef:\n name: my-aws-credentials\n
The managed zone references a secret containing the external DNS provider services credentials.
apiVersion: v1\nkind: Secret\nmetadata:\n name: my-aws-credentials\n namespace: <ManagedZone Namespace>\ndata:\n AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n AWS_REGION: <AWS_REGION>\n AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\ntype: kuadrant.io/aws\n
"},{"location":"kuadrant-operator/doc/dns/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"When a DNSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a matching ManagedZone.
Target a Gateway by setting the spec.targetRef
field of the DNSPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: DNSPolicy\nmetadata:\n name: <DNSPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/dns/#dnsrecord-resource","title":"DNSRecord Resource","text":"The DNSPolicy will create a DNSRecord resource for each listener hostname with a suitable ManagedZone configured. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.
Given the following multi cluster gateway status:
status:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-1/172.31.201.1\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-2/172.31.202.1\n listeners:\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-1.api\n supportedKinds: []\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-2.api\n supportedKinds: [] \n
A DNSPolicy targeting this gateway would create an appropriate DNSRecord based on the routing strategy selected.
"},{"location":"kuadrant-operator/doc/dns/#loadbalanced","title":"loadbalanced","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: echo.apps.hcpapps.net\n namespace: <Gateway Namespace>\nspec:\n endpoints:\n\n - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n managedZone:\n name: apps.hcpapps.net \n
After DNSRecord reconciliation the listener hostname should be resolvable through dns:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/dns/#simple","title":"simple","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: echo.apps.hcpapps.net\n namespace: <Gateway Namespace>\nspec:\n endpoints:\n\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n - 172.31.202.1\n managedZone:\n name: apps.hcpapps.net \n
After DNSRecord reconciliation the listener hostname should be resolvable through dns:
dig echo.apps.hcpapps.net +short\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/dns/#examples","title":"Examples","text":"Check out the following user guides for examples of using the Kuadrant DNSPolicy:
"},{"location":"kuadrant-operator/doc/dns/#known-limitations","title":"Known limitations","text":" - One Gateway can only be targeted by one DNSPolicy.
- DNSPolicies can only target Gateways defined within the same namespace of the DNSPolicy.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/","title":"DNS Health Checks","text":"DNS Health Checks are a tool provided by some DNS Providers for ensuring the availability and reliability of your DNS Records and only publishing DNS Records that resolve to healthy workloads. Kuadrant offers a powerful feature known as DNSPolicy, which allows you to configure these health checks for all the managed DNS endpoints created as a result of that policy. This guide provides a comprehensive overview of how to set up, utilize, and understand these DNS health checks.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#supported-providers","title":"Supported Providers","text":"we currently only support AWS Route53 DNS Health checks.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#configuration-of-health-checks","title":"Configuration of Health Checks","text":"To configure a DNS health check, you need to specify the healthCheck
section of the DNSPolicy, which includes important properties such as:
endpoint
: This is the path where the health checks take place, usually represented as '/healthz' or something similar. port
: Specific port for the connection to be checked. protocol
: Type of protocol being used, like HTTP or HTTPS. FailureThreshold
: How many times we can tolerate a failure on this endpoint, before removing the related DNS entry.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing: simple\n healthCheck:\n endpoint: \"/health\"\n port: 443\n protocol: \"HTTPS\"\n failureThreshold: 5\n
This configuration sets up a DNS health check in AWS Route53 which will connect by HTTPS on port 443 and request the path /health.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#reviewing-the-status-of-health-checks","title":"Reviewing the status of Health Checks","text":"The DNS Record CR will show whether the health check has been created or not in the DNS Provider, and will also show any errors encountered when trying to create or update the health check configuration.
To see the status of the executing health check requires logging in to the Route53 console to view the current probe results.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#reconfiguring-health-checks","title":"Reconfiguring Health Checks","text":"To reconfigure the health checks, update the HealthCheck section of the DNS Policy, this will be reflected into all the health checks created as a result of this policy.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#removing-health-checks","title":"Removing Health Checks","text":"To remove the health checks created in AWS, delete the healthcheck section of the DNS Policy. All health checks will be deleted automatically, if the DNS Policy is deleted.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#limitations","title":"Limitations","text":"As Route53 will only perform health checks on an IP address, currently do not create health checks on DNS Policies that target gateways with hostname addresses.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#other-providers","title":"Other Providers","text":"Although we intend to support integrating with the DNS Health checks provided by other DNS Providers in the future, we currently only support AWS Route53.
"},{"location":"kuadrant-operator/doc/logging/","title":"Logging","text":"The kuadrant operator outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information. Actions like creating, deleteing or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details.
To configure the desired log level, set the environment variable LOG_LEVEL
to one of the supported values listed above. Default log level is info
.
Apart from log level, the operator can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the environment variable LOG_MODE
to one of the supported values listed above. Default log level is production
.
"},{"location":"kuadrant-operator/doc/rate-limiting/","title":"Kuadrant Rate Limiting","text":"A Kuadrant RateLimitPolicy custom resource, often abbreviated \"RateLimitPolicy\":
- Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to rate limit.
- Supports targeting subsets (sections) of a network resource to apply the limits to.
- Abstracts the details of the underlying Rate Limit protocol and configuration resources, that have a much broader remit and surface area.
- Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/rate-limiting/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"Kuadrant's Rate Limit implementation relies on the Envoy's Rate Limit Service (RLS) protocol. The workflow per request goes:
- On incoming request, the gateway checks the matching rules for enforcing rate limits, as stated in the RateLimitPolicy custom resources and targeted Gateway API networking objects
- If the request matches, the gateway sends one RateLimitRequest to the external rate limiting service (\"Limitador\").
- The external rate limiting service responds with a RateLimitResponse back to the gateway with either an
OK
or OVER_LIMIT
response code.
A RateLimitPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external rate limiting service.
"},{"location":"kuadrant-operator/doc/rate-limiting/#the-ratelimitpolicy-custom-resource","title":"The RateLimitPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#overview","title":"Overview","text":"The RateLimitPolicy
spec includes, basically, two parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - Limit definitions (
spec.limits
)
Each limit definition includes:
- A set of rate limits (
spec.limits.<limit-name>.rates[]
) - (Optional) A set of dynamic counter qualifiers (
spec.limits.<limit-name>.counters[]
) - (Optional) A set of route selectors, to further qualify the specific routing rules when to activate the limit (
spec.limits.<limit-name>.routeSelectors[]
) - (Optional) A set of additional dynamic conditions to activate the limit (
spec.limits.<limit-name>.when[]
)
The limit definitions (limits
) can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults
or overrides
blocks.
Check out Kuadrant RFC 0002 to learn more about the Well-known Attributes that can be used to define counter qualifiers (counters
) and conditions (when
)."},{"location":"kuadrant-operator/doc/rate-limiting/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: my-rate-limit-policy\nspec:\n # Reference to an existing networking resource to attach the policy to. REQUIRED.\n # It can be a Gateway API HTTPRoute or Gateway resource.\n # It can only refer to objects in the same namespace as the RateLimitPolicy.\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n\n # The limits definitions to apply to the network traffic routed through the targeted resource.\n # Equivalent to if otherwise declared within `defaults`.\n limits:\n \"my_limit\":\n # The rate limits associated with this limit definition. REQUIRED.\n # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`\n rates: [\u2026]\n\n # Counter qualifiers.\n # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.\n # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.username`.\n # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n counters: [\u2026]\n\n # Further qualification of the scpecific HTTPRouteRules within the targeted HTTPRoute that should trigger the limit.\n # Each element contains a HTTPRouteMatch object that will be used to select HTTPRouteRules that include at least one identical HTTPRouteMatch.\n # The HTTPRouteMatch part does not have to be fully identical, but the what's stated in the selector must be identically stated in the HTTPRouteRule.\n # Do not use it on RateLimitPolicies that target a Gateway.\n routeSelectors: [\u2026]\n\n # Additional dynamic conditions to trigger the limit.\n # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.\n # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n when: [\u2026]\n\n # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n # routes that lack a more specific policy attached to.\n # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.\n defaults:\n limits: {\u2026}\n\n # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n # thus also overriding any more specific policy occasionally attached to any of those routes.\n # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.\n overrides:\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#using-the-ratelimitpolicy","title":"Using the RateLimitPolicy","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"When a RateLimitPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs
field of the HTTPRoute.
The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets, by specifying the routeSelectors
field of the limit definition.
Target a HTTPRoute by setting the spec.targetRef
field of the RateLimitPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: <RateLimitPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: <HTTPRoute Name>\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"If a RateLimitPolicy targets a route defined for *.com
and another RateLimitPolicy targets another route for api.com
, the Kuadrant control plane will not merge these two RateLimitPolicies. Unless one of the policies declare an overrides set of limites, the control plane will configure to mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and limit definitions.
E.g., by default, a request coming for api.com
will be rate limited according to the rules from the RateLimitPolicy that targets the route for api.com
; while a request for other.com
will be rate limited with the rules from the RateLimitPolicy targeting the route for *.com
.
See more examples in Overlapping Gateway and HTTPRoute RateLimitPolicies.
"},{"location":"kuadrant-operator/doc/rate-limiting/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"A RateLimitPolicy that targets a Gateway can declare a block of defaults (spec.defaults
) or a block of overrides (spec.overrides
). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.
When declaring defaults, a RateLimitPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific RateLimitPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default RateLimitPolicy, as well as changes in the existing HTTPRoutes.
Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting safe default limits on hostnames and hostname wildcards.
Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.
Target a Gateway HTTPRoute by setting the spec.targetRef
field of the RateLimitPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: <RateLimitPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n defaults: # alternatively: `overrides`\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#overlapping-gateway-and-httproute-ratelimitpolicies","title":"Overlapping Gateway and HTTPRoute RateLimitPolicies","text":"Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.
Gateway RateLimitPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute RateLimitPolicy exists, in which case the HTTPRoute RateLimitPolicy prevails.
Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):
- RateLimitPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy G (defaults) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 RateLimitPolicy A will be enforced - Request to
b.toystore.com
\u2192 RateLimitPolicy B will be enforced - Request to
other.toystore.com
\u2192 RateLimitPolicy W will be enforced - Request to
other.com
(suppose a route exists) \u2192 RateLimitPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
Gateway RateLimitPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute RateLimitPolicy.
Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):
- RateLimitPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy G (overrides) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
b.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
other.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
other.com
(suppose a route exists) \u2192 RateLimitPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
"},{"location":"kuadrant-operator/doc/rate-limiting/#limit-definition","title":"Limit definition","text":"A limit will be activated whenever a request comes in and the request matches:
- any of the route rules selected by the limit (via
routeSelectors
or implicit \"catch-all\" selector), and - all of the
when
conditions specified in the limit.
A limit can define:
- counters that are qualified based on dynamic values fetched from the request, or
- global counters (implicitly, when no qualified counter is specified)
A limit is composed of one or more rate limits.
E.g.
spec:\n limits:\n \"toystore-all\":\n rates:\n\n - limit: 5000\n duration: 1\n unit: second\n\n \"toystore-api-per-username\":\n rates:\n\n - limit: 100\n duration: 1\n unit: second\n - limit: 1000\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n hostnames:\n - api.toystore.com\n\n \"toystore-admin-unverified-users\":\n rates:\n\n - limit: 250\n duration: 1\n unit: second\n routeSelectors:\n hostnames:\n - admin.toystore.com\n when:\n - selector: auth.identity.email_verified\n operator: eq\n value: \"false\"\n
Request to Rate limits enforced api.toystore.com
100rps/username or 1000rpm/username (whatever happens first) admin.toystore.com
250rps other.toystore.com
5000rps"},{"location":"kuadrant-operator/doc/rate-limiting/#route-selectors","title":"Route selectors","text":"Route selectors allow targeting sections of a HTTPRoute, by specifying sets of HTTPRouteMatches and/or hostnames that make the policy controller look up within the HTTPRoute spec for compatible declarations, and select the corresponding HTTPRouteRules and hostnames, to then build conditions that activate the policy or policy rule.
Check out Route selectors for a full description, semantics and API reference.
"},{"location":"kuadrant-operator/doc/rate-limiting/#when-conditions","title":"when
conditions","text":"when
conditions can be used to scope a limit (i.e. to filter the traffic to which a limit definition applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules via routeSelectors
.
Use when
conditions to conditionally activate limits based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames
and spec.rules.matches
fields, or in general in RateLimitPolicies that target a Gateway.
The selectors within the when
conditions of a RateLimitPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.
"},{"location":"kuadrant-operator/doc/rate-limiting/#examples","title":"Examples","text":"Check out the following user guides for examples of rate limiting services with Kuadrant:
- Simple Rate Limiting for Application Developers
- Authenticated Rate Limiting for Application Developers
- Gateway Rate Limiting for Cluster Operators
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/rate-limiting/#known-limitations","title":"Known limitations","text":" - One HTTPRoute can only be targeted by one RateLimitPolicy.
- One Gateway can only be targeted by one RateLimitPolicy.
- RateLimitPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the RateLimitPolicy.
- 2+ RateLimitPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/rate-limiting/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"Kuadrant currently does not support multiple RateLimitPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames
field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one RateLimitPolicy targeting any of those resources that specify identical hostnames is allowed.
Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.
This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that honored by the API gateway in cases of conflicting hostnames.
To exemplify one way this limitation can impact deployments, consider the following topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2 \u25b2\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 RateLimitPolicy \u2502 \u2502 RateLimitPolicy \u2502\n \u2502 (policy-1) \u2502 \u2502 (policy-2) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
In the example above, with the policy-1
resource created before policy-2
, policy-2
will be enforced on all requests to app.io/bar
while policy-1
will not be enforced at all. I.e. app.io/foo
will not be rate-limited. Nevertheless, both policies will report status condition as Enforced
.
Notice the enforcement of policy-2
and no enforcement of policy-1
is the opposite behavior as the analogous problem with the Kuadrant AuthPolicy.
A different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 RateLimitPolicy \u2502\n \u2502 \u2502 (policy-2) \u2502\n \u25bc \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 RateLimitPolicy \u2502\n \u2502 (policy-1) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Once again, both policies will report status condition as Enforced
. However, in this case, only policy-1
will be enforced on requests to app.io/foo
, while policy-2
will not be enforced at all. I.e. app.io/bar
will not be not rate-limited. This is same behavior as the analogous problem with the Kuadrant AuthPolicy.
To avoid these problems, use different hostnames in each route.
"},{"location":"kuadrant-operator/doc/rate-limiting/#implementation-details","title":"Implementation details","text":"Driven by limitations related to how Istio injects configuration in the filter chains of the ingress gateways, Kuadrant relies on Envoy's Wasm Network filter in the data plane, to manage the integration with rate limiting service (\"Limitador\"), instead of the Rate Limit filter.
Motivation: Multiple rate limit domains The first limitation comes from having only one filter chain per listener. This often leads to one single global rate limiting filter configuration per gateway, and therefore to a shared rate limit domain across applications and policies. Even though, in a rate limit filter, the triggering of rate limit calls, via actions to build so-called \"descriptors\", can be defined at the level of the virtual host and/or specific route rule, the overall rate limit configuration is only one, i.e., always the same rate limit domain for all calls to Limitador.
On the other hand, the possibility to configure and invoke the rate limit service for multiple domains depending on the context allows to isolate groups of policy rules, as well as to optimize performance in the rate limit service, which can rely on the domain for indexation.
Motivation: Fine-grained matching rules A second limitation of configuring the rate limit filter via Istio, particularly from Gateway API resources, is that rate limit descriptors at the level of a specific HTTP route rule require \"named routes\" \u2013 defined only in an Istio VirtualService resource and referred in an EnvoyFilter one. Because Gateway API HTTPRoute rules lack a \"name\" property1, as well as the Istio VirtualService resources are only ephemeral data structures handled by Istio in-memory in its implementation of gateway configuration for Gateway API, where the names of individual route rules are auto-generated and not referable by users in a policy23, rate limiting by attributes of the HTTP request (e.g., path, method, headers, etc) would be very limited while depending only on Envoy's Rate Limit filter.
Motivated by the desire to support multiple rate limit domains per ingress gateway, as well as fine-grained HTTP route matching rules for rate limiting, Kuadrant implements a wasm-shim that handles the rules to invoke the rate limiting service, complying with Envoy's Rate Limit Service (RLS) protocol.
The wasm module integrates with the gateway in the data plane via Wasm Network filter, and parses a configuration composed out of user-defined RateLimitPolicy resources by the Kuadrant control plane. Whereas the rate limiting service (\"Limitador\") remains an implementation of Envoy's RLS protocol, capable of being integrated directly via Rate Limit extension or by Kuadrant, via wasm module for the Istio Gateway API implementation.
As a consequence of this design:
- Users can define fine-grained rate limit rules that match their Gateway and HTTPRoute definitions including for subsections of these.
- Rate limit definitions are insulated, not leaking across unrelated policies or applications.
- Conditions to activate limits are evaluated in the context of the gateway process, reducing the gRPC calls to the external rate limiting service only to the cases where rate limit counters are known in advance to have to be checked/incremented.
- The rate limiting service can rely on the indexation to look up for groups of limit definitions and counters.
- Components remain compliant with industry protocols and flexible for different integration options.
A Kuadrant wasm-shim configuration for 2 RateLimitPolicy custom resources (a Gateway default RateLimitPolicy and a HTTPRoute RateLimitPolicy) looks like the following and it is generated automatically by the Kuadrant control plane:
apiVersion: extensions.istio.io/v1alpha1\nkind: WasmPlugin\nmetadata:\n name: kuadrant-istio-ingressgateway\n namespace: istio-system\n \u2026\nspec:\n phase: STATS\n pluginConfig:\n failureMode: deny\n rateLimitPolicies:\n\n - domain: istio-system/gw-rlp # allows isolating policy rules and improve performance of the rate limit service\n hostnames:\n - '*.website'\n - '*.io'\n name: istio-system/gw-rlp\n rules: # match rules from the gateway and according to conditions specified in the policy\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /\n data:\n - static: # tells which rate limit definitions and counters to activate\n key: limit.internet_traffic_all__593de456\n value: \"1\"\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /\n - operator: endswith\n selector: request.host\n value: .io\n data:\n - static:\n key: limit.internet_traffic_apis_per_host__a2b149d2\n value: \"1\"\n - selector:\n selector: request.host\n service: kuadrant-rate-limiting-service\n - domain: default/app-rlp\n hostnames:\n - '*.toystore.website'\n - '*.toystore.io'\n name: default/app-rlp\n rules: # matches rules from a httproute and additional specified in the policy\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /assets/\n data:\n - static:\n key: limit.toystore_assets_all_domains__8cfb7371\n value: \"1\"\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /v1/\n - operator: eq\n selector: request.method\n value: GET\n - operator: endswith\n selector: request.host\n value: .toystore.website\n - operator: eq\n selector: auth.identity.username\n value: \"\"\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /v1/\n - operator: eq\n selector: request.method\n value: POST\n - operator: endswith\n selector: request.host\n value: .toystore.website\n - operator: eq\n selector: auth.identity.username\n value: \"\"\n data:\n - static:\n key: limit.toystore_v1_website_unauthenticated__3f9c40c6\n value: \"1\"\n service: kuadrant-rate-limiting-service\n selector:\n matchLabels:\n istio.io/gateway-name: istio-ingressgateway\n url: oci://quay.io/kuadrant/wasm-shim:v0.3.0\n
-
https://github.com/kubernetes-sigs/gateway-api/pull/996\u00a0\u21a9
-
https://github.com/istio/istio/issues/36790\u00a0\u21a9
-
https://github.com/istio/istio/issues/37346\u00a0\u21a9
"},{"location":"kuadrant-operator/doc/tls/","title":"TLS","text":"A Kuadrant TLSPolicy custom resource:
- Targets Gateway API networking resources Gateways to provide tls for gateway listeners by managing the lifecycle of tls certificates using
CertManager
.
"},{"location":"kuadrant-operator/doc/tls/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/tls/#the-tlspolicy-custom-resource","title":"The TLSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/tls/#overview","title":"Overview","text":"The TLSPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
)
"},{"location":"kuadrant-operator/doc/tls/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: my-tls-policy\nspec:\n # reference to an existing networking resource to attach the policy to\n # it can only be a Gateway API Gateway resource\n # it can only refer to objects in the same namespace as the TLSPolicy\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: mygateway\n
Check out the API reference for a full specification of the TLSPolicy CRD.
"},{"location":"kuadrant-operator/doc/tls/#using-the-tlspolicy","title":"Using the TLSPolicy","text":""},{"location":"kuadrant-operator/doc/tls/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"When a TLSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a valid TLS section.
Target a Gateway by setting the spec.targetRef
field of the TLSPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: TLSPolicy\nmetadata:\n name: <TLSPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/tls/#examples","title":"Examples","text":"Check out the following user guides for examples of using the Kuadrant TLSPolicy:
"},{"location":"kuadrant-operator/doc/tls/#known-limitations","title":"Known limitations","text":""},{"location":"kuadrant-operator/doc/install/install-openshift/","title":"Install Kuadrant on an OpenShift cluster","text":"NOTE: You must perform these steps on each OpenShift cluster that you want to use Kuadrant on.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#prerequisites","title":"Prerequisites","text":" - OpenShift Container Platform 4.14.x or later with community Operator catalog available.
- AWS account with Route 53 and zone.
- Accessible Redis instance.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#procedure","title":"Procedure","text":""},{"location":"kuadrant-operator/doc/install/install-openshift/#step-1-set-up-your-environment","title":"Step 1 - Set up your environment","text":"export AWS_ACCESS_KEY_ID=xxxxxxx # Key ID from AWS with Route 53 access\nexport AWS_SECRET_ACCESS_KEY=xxxxxxx # Access key from AWS with Route 53 access\nexport REDIS_URL=redis://user:xxxxxx@some-redis.com:10340 # A Redis cluster URL\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-2-install-gateway-api-v1","title":"Step 2 - Install Gateway API v1","text":"Before you can use Kuadrant, you must install Gateway API v1 as follows:
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-3-install-and-configure-istio-with-the-sail-operator","title":"Step 3 - Install and configure Istio with the Sail Operator","text":"Kuadrant integrates with Istio as a Gateway API provider. You can set up an Istio-based Gateway API provider by using the Sail Operator.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#install-istio","title":"Install Istio","text":"To install the Istio Gateway provider, run the following commands:
kubectl create ns istio-system\n
kubectl apply -f - <<EOF\nkind: OperatorGroup\napiVersion: operators.coreos.com/v1\nmetadata:\n name: sail\n namespace: istio-system\nspec: \n upgradeStrategy: Default \n--- \napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: sailoperator\n namespace: istio-system\nspec:\n channel: 3.0-dp1\n installPlanApproval: Automatic\n name: sailoperator\n source: community-operators\n sourceNamespace: openshift-marketplace\nEOF\n
Check the status of the installation as follows:
kubectl get installplan -n istio-system -o=jsonpath='{.items[0].status.phase}'\n
When ready, the status will change from installing
to complete
.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#configure-istio","title":"Configure Istio","text":"To configure the Istio Gateway API provider, run the following command:
kubectl apply -f - <<EOF\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n name: default\nspec:\n version: v1.21.0\n namespace: istio-system\n # Disable autoscaling to reduce dev resources\n values:\n pilot:\n autoscaleEnabled: false\nEOF\n
Wait for Istio to be ready as follows:
kubectl wait istio/default -n istio-system --for=\"condition=Ready=true\"\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-4-optional-configure-observability-and-metrics","title":"Step 4 - Optional: Configure observability and metrics","text":"Kuadrant provides a set of example dashboards that use known metrics exported by Kuadrant and Gateway components to provide insight into different components of your APIs and Gateways. While not essential, it is best to set up an OpenShift monitoring stack. This section provides links to OpenShift and Thanos documentation on configuring monitoring and metrics storage.
You can set up user-facing monitoring by following the steps in the OpenShift documentation on configuring the monitoring stack.
If you have user workload monitoring enabled, it is best to configure remote writes to a central storage system such as Thanos:
- OpenShift remote write configuration
- Kube Thanos
The example dashboards and alerts for observing Kuadrant functionality use low-level CPU metrics and network metrics available from the user monitoring stack in OpenShift. They also use resource state metrics from Gateway API and Kuadrant resources.
To scrape these additional metrics, you can install a kube-state-metrics instance
, with a custom resource configuration as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/kube-state-metrics.yaml\nkubectl apply -k https://github.com/Kuadrant/gateway-api-state-metrics?ref=main\n
To enable request metrics in Istio, you must create a telemetry
resource as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/telemetry.yaml\n
If you have Grafana installed in your cluster, you can import the example dashboards and alerts.
For example installation details, see installing Grafana on OpenShift. When installed, you must add your Thanos instance as a data source to Grafana. Alternatively, if you are using only the user workload monitoring stack in your OpenShift cluster, and not writing metrics to an external Thanos instance, you can set up a data source to the thanos-querier route in the OpenShift cluster.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-5-create-secrets-for-your-credentials","title":"Step 5 - Create secrets for your credentials","text":"Before installing the Kuadrant Operator, you must enter the following commands to set up secrets that you will use later:
kubectl create ns kuadrant-system\n
Set up a CatalogSource
as follows:
kubectl apply -f - <<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n name: kuadrant-operator-catalog\n namespace: kuadrant-system\nspec:\n sourceType: grpc\n image: quay.io/kuadrant/kuadrant-operator-catalog:v0.8.0\n displayName: Kuadrant Operators\n publisher: grpc\n updateStrategy:\n registryPoll:\n interval: 45m\nEOF \n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#aws-route-53-credentials-for-tls","title":"AWS Route 53 credentials for TLS","text":"Set the AWS Route 53 credentials for TLS verification as follows:
kubectl -n kuadrant-system create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#redis-credentials-for-rate-limiting-counters","title":"Redis credentials for rate limiting counters","text":"Set the Redis credentials for shared multicluster counters for the Kuadrant Limitador component as follows:
kubectl -n kuadrant-system create secret generic redis-config \\\n --from-literal=URL=$REDIS_URL \n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#aws-route-53-credentials-for-dns","title":"AWS Route 53 credentials for DNS","text":"Set the AWS Route 53 credentials for managing DNS records as follows:
kubectl create ns ingress-gateway\n
kubectl -n ingress-gateway create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-6-install-the-kuadrant-operator","title":"Step 6 - Install the Kuadrant Operator","text":"To install the Kuadrant Operator, enter the following command:
kubectl apply -f - <<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: kuadrant-operator\n namespace: kuadrant-system\nspec:\n channel: stable\n installPlanApproval: Automatic\n name: kuadrant-operator\n source: kuadrant-operator-catalog\n sourceNamespace: kuadrant-system\n---\nkind: OperatorGroup\napiVersion: operators.coreos.com/v1\nmetadata:\n name: kuadrant\n namespace: kuadrant-system\nspec: \n upgradeStrategy: Default\nEOF\n
Wait for the Kuadrant Operators to be installed as follows:
kubectl get installplan -n kuadrant-system -o=jsonpath='{.items[0].status.phase}'\n
After some time, this command should return complete
.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-7-configure-kuadrant","title":"Step 7 - Configure Kuadrant","text":"To configure your Kuadrant deployment, enter the following command:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\n namespace: kuadrant-system\nspec:\n limitador:\n storage:\n redis-cached:\n configSecretRef:\n name: redis-config \nEOF \n
Wait for Kuadrant to be ready as follows:
kubectl wait kuadrant/kuadrant --for=\"condition=Ready=true\" -n kuadrant-system --timeout=300s\n
Kuadrant is now ready to use.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#next-steps","title":"Next steps","text":" - Secure, protect, and connect APIs with Kuadrant on OpenShift
"},{"location":"kuadrant-operator/doc/observability/examples/","title":"Example Dashboards and Alerts","text":"Explore a variety of starting points for monitoring your Kuadrant installation with our examples folder. These dashboards and alerts are ready-to-use and easily customizable to fit your environment.
There are some example dashboards uploaded to Grafana.com . You can use the ID's listed below to import these dashboards into Grafana:
Name ID App Developer Dashboard 20970
Business User Dashboard 20981
Platform Engineer Dashboard 20982
"},{"location":"kuadrant-operator/doc/observability/examples/#dashboards","title":"Dashboards","text":""},{"location":"kuadrant-operator/doc/observability/examples/#importing-dashboards-into-grafana","title":"Importing Dashboards into Grafana","text":"For more details on how to import dashboards into Grafana, visit the import dashboards page.
- UI Method:
- JSON - Use the 'Import' feature in the Grafana UI to upload dashboard JSON files directly.
- ID - Use the 'Import' feature in the Grafana UI to import via Grafana.com using a Dashboard ID.
- ConfigMap Method: Automate dashboard provisioning by adding files to a ConfigMap, which should be mounted at
/etc/grafana/provisioning/dashboards
.
Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.
"},{"location":"kuadrant-operator/doc/observability/examples/#alerts","title":"Alerts","text":""},{"location":"kuadrant-operator/doc/observability/examples/#setting-up-alerts-in-prometheus","title":"Setting Up Alerts in Prometheus","text":"Integrate alerts into Prometheus using a PrometheusRule
resource. Adjust alert thresholds to meet your specific operational needs.
Further information on the metrics used for these alerts can be found on the metrics page.
"},{"location":"kuadrant-operator/doc/observability/metrics/","title":"Metrics","text":"This is a reference page for some of the different metrics used in example dashboards and alerts. It is not an exhaustive list. The documentation for each component may provide more details on a per-component basis. Some of the metrics are sourced from components outside the Kuadrant project, for example, Envoy. The value of this reference is showing some of the more widely desired metrics, and how to join the metrics from different sources together in a meaningful way.
"},{"location":"kuadrant-operator/doc/observability/metrics/#metrics-sources","title":"Metrics sources","text":" - Kuadrant components
- Istio
- Envoy
- Kube State Metrics
- Gateway API State Metrics
- Kubernetes metrics
"},{"location":"kuadrant-operator/doc/observability/metrics/#resource-usage-metrics","title":"Resource usage metrics","text":"Resource metrics, like CPU, memory and disk usage, primarily come from the Kubernetes metrics components. These include container_cpu_usage_seconds_total
, container_memory_working_set_bytes
and kubelet_volume_stats_used_bytes
. A stable list of metrics is maintained in the Kubernetes repository. These low-level metrics typically have a set of recording rules that aggregate values by labels and time ranges. For example, node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
or namespace_workload_pod:kube_pod_owner:relabel
. If you have deployed the kube-prometheus project, you should have the majority of these metrics being scraped.
"},{"location":"kuadrant-operator/doc/observability/metrics/#networking-metrics","title":"Networking metrics","text":"Low-level networking metrics like container_network_receive_bytes_total
are also available from the Kubernetes metrics components. HTTP & GRPC traffic metrics with higher level labels are available from Istio. One of the main metrics would be istio_requests_total
, which is a counter incremented for every request handled by an Istio proxy. Latency metrics are available via the istio_request_duration_milliseconds
metric, with buckets for varying response times.
Some example dashboards have panels that make use of the request URL path. The path is not added as a label to Istio metrics by default, as it has the potential to increase metric cardinality, and thus storage requirements. If you want to make use of the path in your queries or visualisations, you can enable the request path metric via the Telemetry resource in istio:
apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n name: namespace-metrics\n namespace: istio-system\nspec:\n metrics:\n\n - providers:\n - name: prometheus\n overrides:\n - match:\n metric: REQUEST_COUNT\n tagOverrides:\n request_url_path:\n value: \"request.url_path\"\n - match: \n metric: REQUEST_DURATION\n tagOverrides:\n request_url_path:\n value: \"request.url_path\"\n
"},{"location":"kuadrant-operator/doc/observability/metrics/#state-metrics","title":"State metrics","text":"The kube-state-metrics project exposes the state of various kuberenetes resources as metrics and labels. For example, the ready status
of a Pod
is available as kube_pod_status_ready
, with labels for the pod name
and namespace
. This can be useful for linking lower level container metrics back to a meaningful resource in the Kubernetes world.
"},{"location":"kuadrant-operator/doc/observability/metrics/#joining-metrics","title":"Joining metrics","text":"Metric queries can be as simple as just the name of the metric, or can be complex with joining & grouping. A lot of the time it can be useful to tie back low level metrics to more meaningful Kubernetes resources. For example, if the memory usage is maxed out on a container and that container is constantly being OOMKilled, it can be useful to get the Deployment and Namespace of that container for debugging. Prometheus query language (or promql) allows vector matching or results (sometimes called joining).
When using Gateway API and Kuadrant resources like HTTPRoute and RateLimitPolicy, the state metrics can be joined to Istio metrics to give a meaningful result set. Here's an example that queries the number of requests per second, and includes the name of the HTTPRoute that the traffic is for.
sum(\n rate(\n istio_requests_total{}[5m]\n )\n) by (destination_service_name)\n\n\n* on(destination_service_name) group_right \n label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n
Breaking this query down, there are 2 parts. The first part is getting the rate of requests hitting the Istio gateway, aggregated to 5m intervals:
sum(\n rate(\n destination_service_name{}[5m]\n )\n) by (destination_service_name)\n
The result set here will include a label for the destination service name (i.e. the Service in Kubernetes). This label is key to looking up the HTTPRoute this traffic belongs to.
The 2nd part of the query uses the gatewayapi_httproute_labels
metric and the label_replace
function. The gatewayapi_httproute_labels
metric gives a list of all httproutes, including any labels on them. The HTTPRoute in this example has a label called 'service', set to be the same as the Istio service name. This allows us to join the 2 results set. However, because the label doesn't match exactly (destination_service_name
and service
), we can replace the label so that it does match. That's what the label_replace
does.
label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n
The 2 parts are joined together using vector matching.
* on(destination_service_name) group_right \n
*
is the binary operator i.e. multiplication (gives join like behaviour) on()
specifies which labels to \"join\" the 2 results with group_right
enables a one to many matching.
See the Prometheus documentation for further details on matching.
"},{"location":"kuadrant-operator/doc/observability/tracing/","title":"Enabling tracing with a central collector","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#introduction","title":"Introduction","text":"This guide outlines the steps to enable tracing in Istio and Kuadrant components (Authorino and Limitador), directing traces to a central collector for improved observability and troubleshooting. We'll also explore a typical troubleshooting flow using traces and logs.
"},{"location":"kuadrant-operator/doc/observability/tracing/#prerequisites","title":"Prerequisites","text":" - A Kubernetes cluster with Istio and Kuadrant installed.
- A trace collector (e.g., Jaeger or Tempo) configured to support OpenTelemetry (OTel).
"},{"location":"kuadrant-operator/doc/observability/tracing/#configuration-steps","title":"Configuration Steps","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#istio-tracing-configuration","title":"Istio Tracing Configuration","text":"Enable tracing in Istio by using the Telemetry API. Depending on your method for installing Istio, you will need to configure a tracing extensionProvider
in your MeshConfig, Istio or IstioOperator resource as well. Here is an example Telemetry and Istio config to sample 100% of requests, if using the Istio Sail Operator.
apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n name: mesh-default\n namespace: istio-system\nspec:\n tracing:\n\n - providers:\n - name: tempo-otlp\n randomSamplingPercentage: 100\n---\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n name: default\nspec:\n namespace: istio-system\n values:\n meshConfig:\n defaultConfig:\n tracing: {}\n enableTracing: true\n extensionProviders:\n - name: tempo-otlp\n opentelemetry:\n port: 4317\n service: tempo.tempo.svc.cluster.local\n
"},{"location":"kuadrant-operator/doc/observability/tracing/#kuadrant-tracing-configuration","title":"Kuadrant Tracing Configuration","text":"The Authorino and Limitador components have request tracing capabilities. Here is an example configuration to enable and send traces to a central collector. Ensure the collector is the same one that Istio is sending traces so that they can be correlated later.
apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n tracing:\n endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n insecure: true\n---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador\nspec:\n tracing:\n endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n
Once the changes are applied, the authorino and limitador components will be redeployed tracing enabled.
Note:
There are plans to consolidate the tracing configuration to a single location i.e. the Kuadrant CR. This will eventually eliminate the need to configure tracing in both the Authorino and Limitador CRs.
Important:
Currently, trace IDs do not propagate to wasm modules in Istio/Envoy, affecting trace continuity in Limitador. This means that requests passed to limitador will not have the relavant 'parent' trace ID in its trace information. If however the trace initiation point is outside of Envoy/Istio, the 'parent' trace ID will be available to limitador and included in traces passed to the collector. This has an impact on correlating traces from limitador with traces from authorino, the gateway and any other components in the path of requests.
"},{"location":"kuadrant-operator/doc/observability/tracing/#troubleshooting-flow-using-traces-and-logs","title":"Troubleshooting Flow Using Traces and Logs","text":"Using a tracing interface like the Jaeger UI or Grafana, you can search for trace information by the trace ID. You may get the trace ID from logs, or from a header in a sample request you want to troubleshoot. You can also search for recent traces, filtering by the service you want to focus on.
Here is an example trace in the Grafana UI showing the total request time from the gateway (Istio), the time to check the curent rate limit count (and update it) in limitador and the time to check auth in Authorino:
In limitador, it is possible to enable request logging with trace IDs to get more information on requests. This requires the log level to be increased to at least debug, so the verbosity must be set to 3 or higher in the Limitador CR. For example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador\nspec:\n verbosity: 3\n
A log entry will look something like this, with the traceparent
field holding the trace ID:
\"Request received: Request { metadata: MetadataMap { headers: {\"te\": \"trailers\", \"grpc-timeout\": \"5000m\", \"content-type\": \"application/grpc\", \"traceparent\": \"00-4a2a933a23df267aed612f4694b32141-00f067aa0ba902b7-01\", \"x-envoy-internal\": \"true\", \"x-envoy-expected-rq-timeout-ms\": \"5000\"} }, message: RateLimitRequest { domain: \"default/toystore\", descriptors: [RateLimitDescriptor { entries: [Entry { key: \"limit.general_user__f5646550\", value: \"1\" }, Entry { key: \"metadata.filter_metadata.envoy\\\\.filters\\\\.http\\\\.ext_authz.identity.userid\", value: \"alice\" }], limit: None }], hits_addend: 1 }, extensions: Extensions }\"\n
If you centrally aggregate logs using something like promtail and loki, you can jump between trace information and the relevant logs for that service:
Using a combination of tracing and logs, you can visualise and troubleshoot reuqest timing issues and drill down to specific services. This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/","title":"RLP can target a Gateway resource","text":"Previous version: https://hackmd.io/IKEYD6NrSzuGQG1nVhwbcw
Based on: https://hackmd.io/_1k6eLCNR2eb9RoSzOZetg
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#introduction","title":"Introduction","text":"The current RateLimitPolicy CRD already implements a targetRef
with a reference to Gateway API's HTTPRoute. This doc captures the design and some implementation details of allowing the targetRef
to reference a Gateway API's Gateway.
Having in place this HTTPRoute - Gateway hierarchy, we are also considering to apply Policy Attachment's defaults/overrides approach to the RateLimitPolicy CRD. But for now, it will only be about targeting the Gateway resource.
On designing Kuadrant's rate limiting and considering Istio/Envoy's rate limiting offering, we hit two limitations (described here). Therefore, not giving up entirely in existing Envoy's RateLimit Filter, we decided to move on and leverage the Envoy's Wasm Network Filter and implement rate limiting wasm-shim module compliant with the Envoy's Rate Limit Service (RLS). This wasm-shim module accepts a PluginConfig struct object as input configuration object.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#use-cases-targeting-a-gateway","title":"Use Cases targeting a gateway","text":"A key use case is being able to provide governance over what service providers can and cannot do when exposing a service via a shared ingress gateway. As well as providing certainty that no service is exposed without my ability as a cluster administrator to protect my infrastructure from unplanned load from badly behaving clients etc.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#goals","title":"Goals","text":"The goal of this document is to define:
- The schema of this
PluginConfig
struct. - The kuadrant-operator behavior filling the
PluginConfig
struct having as input the RateLimitPolicy k8s objects - The behavior of the wasm-shim having the
PluginConfig
struct as input.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"Kuadrant's rate limit relies on the Rate Limit Service (RLS) protocol, hence the gateway generates, based on a set of actions, a set of descriptors (one descriptor is a set of descriptor entries). Those descriptors are send to the external rate limit service provider. When multiple descriptors are provided, the external service provider will limit on ALL of them and return an OVER_LIMIT response if any of them are over limit.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-crd-of-the-ratelimitpolicy","title":"Schema (CRD) of the RateLimitPolicy","text":"---\napiVersion: kuadrant.io/v1beta1\nkind: RateLimitPolicy\nmetadata:\n name: my-rate-limit-policy\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n rateLimits:\n\n - rules:\n - paths: [\"/admin/*\"]\n methods: [\"GET\"]\n hosts: [\"example.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"yes\"\n limits:\n - conditions: [\"admin == yes\"]\n max_value: 500\n seconds: 30\n variables: []\n
.spec.rateLimits
holds a list of rate limit configurations represented by the object RateLimit
. Each RateLimit
object represents a complete rate limit configuration. It contains three fields:
-
rules
(optional): Rules allow matching hosts
and/or methods
and/or paths
. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.
-
configurations
(required): Specifies a set of rate limit configurations that could be applied. The rate limit configuration object is the equivalent of the config.route.v3.RateLimit envoy object. One configuration is, in turn, a list of rate limit actions. Each action populates a descriptor entry. A vector of descriptor entries compose a descriptor. Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor. These rate limiting configuration rules provide flexibility to produce multiple descriptors. For example, you may want to define one generic rate limit descriptor and another descriptor depending on some header. If the header does not exist, the second descriptor is not generated, but traffic keeps being rate limited based on the generic descriptor.
configurations:\n\n - actions:\n - request_headers:\n header_name: \"X-MY-CUSTOM-HEADER\"\n descriptor_key: \"custom-header\"\n skip_if_absent: true\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
limits
(optional): configuration of the rate limiting service (Limitador). Check out limitador documentation for more information about the fields of each Limit
object.
Note: No namespace
/domain
defined. Kuadrant operator will figure out.
Note: There is no PREAUTH
, POSTAUTH
stage defined. Ratelimiting filter should be placed after authorization filter to enable authenticated rate limiting. In the future, stage
can be implemented.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#kuadrant-operators-behavior","title":"Kuadrant-operator's behavior","text":"One HTTPRoute can only be targeted by one rate limit policy.
Similarly, one Gateway can only be targeted by one rate limit policy.
However, indirectly, one gateway will be affected by multiple rate limit policies. It is by design of the Gateway API, one gateway can be referenced by multiple HTTPRoute objects. Furthermore, one HTTPRoute can reference multiple gateways.
The kuadrant operator will aggregate all the rate limit policies that apply for each gateway, including RLP targeting HTTPRoutes and Gateways.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#virtualhosting-ratelimitpolicies","title":"\"VirtualHosting\" RateLimitPolicies","text":"Rate limit policies are scoped by the domains defined at the referenced HTTPRoute's hostnames and Gateway's Listener's Hostname.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#multiple-httproutes-with-the-same-hostname","title":"Multiple HTTPRoutes with the same hostname","text":"When there are multiple HTTPRoutes with the same hostname, HTTPRoutes are all admitted and envoy merge the routing configuration in the same virtualhost. In these cases, the control plane has to \"merge\" the rate limit configuration into a single entry for the wasm filter.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#overlapping-httproutes","title":"Overlapping HTTPRoutes","text":"If some RLP targets a route for *.com
and other RLP targets another route for api.com
, the control plane does not do any merging. A request coming for api.com
will be rate limited with the rules from the RLP targeting the route api.com
. Also, a request coming for other.com
will be rate limited with the rules from the RLP targeting the route *.com
.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#examples","title":"examples","text":"RLP A -> HTTPRoute A (api.toystore.com
) -> Gateway G (*.com
)
RLP B -> HTTPRoute B (other.toystore.com
) -> Gateway G (*.com
)
RLP H -> HTTPRoute H (*.toystore.com
) -> Gateway G (*.com
)
RLP G -> Gateway G (*.com
)
Request 1 (api.toystore.com
) -> apply RLP A and RLP G
Request 2 (other.toystore.com
) -> apply RLP B and RLP G
Request 3 (unknown.toystore.com
) -> apply RLP H and RLP G
Request 4 (other.com
) -> apply RLP G
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-domain-limitador-namespace","title":"rate limit domain / limitador namespace","text":"The kuadrant operator will add domain
attribute of the Envoy's Rate Limit Service (RLS). It will also add the namespace
attribute of the Limitador's rate limit config. The operator will ensure that the associated actions and rate limits have a common domain/namespace.
The value of this domain/namespace seems to be related to the virtualhost for which rate limit applies.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-of-the-wasm-filter-configuration-object-the-pluginconfig","title":"Schema of the WASM filter configuration object: the PluginConfig
","text":"Currently the PluginConfig looks like this:
# The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nratelimitpolicies:\n default/toystore: # rate limit policy {NAMESPACE/NAME}\n hosts: # HTTPRoute hostnames\n\n - '*.toystore.com'\n rules: # route level actions\n - operations:\n - paths:\n - /admin/toy\n methods:\n - POST\n - DELETE\n actions:\n - generic_key:\n descriptor_value: yes\n descriptor_key: admin\n global_actions: # virtualHost level actions\n - generic_key:\n descriptor_value: yes\n descriptor_key: vhaction\n upstream_cluster: rate-limit-cluster # Limitador address reference\n domain: toystore-app # RLS protocol domain value\n
Proposed new design for the WASM filter configuration object (PluginConfig
struct):
# The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nrate_limit_policies:\n\n - name: toystore\n rate_limit_domain: toystore-app\n upstream_cluster: rate-limit-cluster\n hostnames: [\"*.toystore.com\"]\n gateway_actions:\n - rules:\n - paths: [\"/admin/toy\"]\n methods: [\"GET\"]\n hosts: [\"pets.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
Update highlights:
- [minor]
rate_limit_policies
is a list instead of a map indexed by the name/namespace. - [major] no distinction between \"rules\" and global actions
- [major] more aligned with RLS: multiple descriptors structured by \"rate limit configurations\" with matching rules
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#wasm-shim","title":"WASM-SHIM","text":"WASM filter rate limit policies are not exactly the same as user managed RateLimitPolicy custom resources. The WASM filter rate limit policies is part of the internal configuration and therefore not exposed to the end user.
At the WASM filter level, there are no route level or gateway level rate limit policies. The rate limit policies in the wasm plugin configuration may not map 1:1 to user managed RateLimitPolicy custom resources. WASM rate limit policies have an internal logical name and a set of hostnames to activate it based on the incoming request\u2019s host header.
The WASM filter builds a tree based data structure holding the rate limit policies. The longest (sub)domain match is used to select the policy to be applied. Only one policy is being applied per invocation.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-configurations","title":"rate limit configurations","text":"The WASM filter configuration object contains a list of rate limit configurations to build a list of Envoy's RLS descriptors. These configurations are defined at
rate_limit_policies[*].gateway_actions[*].configurations\n
For example:
configurations:\n\n- actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
How to read the policy:
-
Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor.
-
Each policy configuration has associated, optionally, a set of rules to match. Rules allow matching hosts
and/or methods
and/or paths
. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.
-
Each configuration object defines a list of actions. Each action may (or may not) produce a descriptor entry (descriptor list item). If an action cannot append a descriptor entry, no descriptor is generated for the configuration.
Note: The external rate limit service will be called when the gateway_actions
object produces at least one not empty descriptor.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#example","title":"example","text":"WASM filter rate limit policy for *.toystore.com
. I want some rate limit descriptors configuration only for api.toystore.com
and another set of descriptors for admin.toystore.com
. The wasm filter config would look like this:
failure_mode_deny: true\nrate_limit_policies:\n\n - name: toystore\n rate_limit_domain: toystore-app\n upstream_cluster: rate-limit-cluster\n hostnames: [\"*.toystore.com\"]\n gateway_actions:\n - configurations: # no rules. Applies to all *.toystore.com traffic\n - actions:\n - generic_key:\n descriptor_key: toystore-app\n descriptor_value: \"1\"\n - rules:\n - hosts: [\"api.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: api\n descriptor_value: \"1\"\n - rules:\n - hosts: [\"admin.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
- When a request for
api.toystore.com
hits the filter, the descriptors generated would be:
descriptor 1
(\"toystore-app\", \"1\")\n
descriptor 2 (\"api\", \"1\")\n
- When a request for
admin.toystore.com
hits the filter, the descriptors generated would be:
descriptor 1
(\"toystore-app\", \"1\")\n
descriptor 2 (\"admin\", \"1\")\n
- When a request for
other.toystore.com
hits the filter, the descriptors generated would be: descriptor 1 (\"toystore-app\", \"1\")\n
"},{"location":"kuadrant-operator/doc/reference/authpolicy/","title":"The AuthPolicy Custom Resource Definition (CRD)","text":" - AuthPolicy
- AuthPolicySpec
- AuthScheme
- AuthRuleCommon
- AuthenticationRule
- MetadataRule
- AuthorizationRule
- ResponseSpec
- SuccessResponseSpec
- SuccessResponseItem
- CallbackRule
- NamedPattern
- AuthPolicyCommonSpec
- AuthPolicyStatus
- ConditionSpec
"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicy","title":"AuthPolicy","text":"Field Type Required Description spec
AuthPolicySpec Yes The specification for AuthPolicy custom resource status
AuthPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicyspec","title":"AuthPolicySpec","text":"Field Type Required Description targetRef
PolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to rules
AuthScheme No Implicit default authentication/authorization rules routeSelectors
[]RouteSelector No List of implicit default selectors of HTTPRouteRules whose matching rules activate the policy. At least one HTTPRouteRule must be selected to activate the policy. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the policy. Do not use it in policies targeting a Gateway. patterns
MapNamedPattern> No Implicit default named patterns of lists of selector
, operator
and value
tuples, to be reused in when
conditions and pattern-matching authorization rules. when
[]PatternExpressionOrRef No List of implicit default additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway. defaults
AuthPolicyCommonSpec No Explicit default definitions. This field is mutually exclusive with any of the implicit default definitions: spec.rules
, spec.routeSelectors
, spec.patterns
, spec.when
overrides
AuthPolicyCommonSpec No Atomic overrides definitions. This field is mutually exclusive with any of the implicit or explicit default definitions: spec.rules
, spec.routeSelectors
, spec.patterns
, spec.when
, spec.default
"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicycommonspec","title":"AuthPolicyCommonSpec","text":"Field Type Required Description rules
AuthScheme No Authentication/authorization rules routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the policy. At least one HTTPRouteRule must be selected to activate the policy. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the policy. Do not use it in policies targeting a Gateway. patterns
MapNamedPattern> No Named patterns of lists of selector
, operator
and value
tuples, to be reused in when
conditions and pattern-matching authorization rules. when
[]PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authscheme","title":"AuthScheme","text":"Field Type Required Description authentication
MapAuthenticationRule> No Authentication rules. At least one config MUST evaluate to a valid identity object for the auth request to be successful. If omitted or empty, anonymous access is assumed. metadata
MapMetadataRule> No Rules for fetching auth metadata from external sources. authorization
MapAuthorizationRule> No Authorization rules. All policies MUST allow access for the auth request be successful. response
ResponseSpec No Customizations to the response to the authorization request. Use it to set custom values for unauthenticated, unauthorized, and/or success access request. callbacks
MapCallbackRule> No Rules for post-authorization callback requests to external services. Triggered regardless of the result of the authorization request."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authrulecommon","title":"AuthRuleCommon","text":"Field Type Required Description routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the auth rule. At least one HTTPRouteRule must be selected to activate the auth rule. If omitted, the auth rule is activated at all requests where the policy is enforced. Do not use it in policies targeting a Gateway. when
[]PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the auth rule. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway. cache
Caching spec No Caching options for the resolved object returned when applying this auth rule. (Default: disabled) priority
Integer No Priority group of the auth rule. All rules in the same priority group are evaluated concurrently; consecutive priority groups are evaluated sequentially. (Default: 0
) metrics
Boolean No Whether the auth rule emits individual observability metrics. (Default: false
)"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authenticationrule","title":"AuthenticationRule","text":"Field Type Required Description apiKey
API Key authentication spec No Authentication based on API keys stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. kubernetesTokenReview
KubernetesTokenReview spec No Authentication by Kubernetes token review. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. jwt
JWT verification spec No Authentication based on JSON Web Tokens (JWT). Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. oauth2Introspection
OAuth2 Token Introscpection spec No Authentication by OAuth2 token introspection. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. x509
X.509 authentication spec No Authentication based on client X.509 certificates. The certificates presented by the clients must be signed by a trusted CA whose certificates are stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. plain
Plain identity object spec No Identity object extracted from the context. Use this method when authentication is performed beforehand by a proxy and the resulting object passed to Authorino as JSON in the auth request. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. anonymous
Anonymous access No Anonymous access. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. credentials
Auth credentials spec No Customizations to where credentials are required to be passed in the request for authentication based on this auth rule. Defaults to HTTP Authorization header with prefix \"Bearer\". overrides
Identity extension spec No JSON overrides to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). defaults
Identity extension spec No JSON defaults to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#metadatarule","title":"MetadataRule","text":"Field Type Required Description http
HTTP GET/GET-by-POST external metadata spec No External source of auth metadata via HTTP request. Use one of: http
, userInfo
, uma
. userInfo
OIDC UserInfo spec No OpendID Connect UserInfo linked to an OIDC authentication rule declared in this same AuthPolicy. Use one of: http
, userInfo
, uma
. uma
UMA metadata spec No User-Managed Access (UMA) source of resource data. Use one of: http
, userInfo
, uma
. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authorizationrule","title":"AuthorizationRule","text":"Field Type Required Description patternMatching
Pattern-matching authorization spec No Pattern-matching authorization rules. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. opa
OPA authorization spec No Open Policy Agent (OPA) Rego policy. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. kubernetesSubjectAccessReview
Kubernetes SubjectAccessReview spec No Authorization by Kubernetes SubjectAccessReview. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. spicedb
SpiceDB authorization spec No Authorization decision delegated to external Authzed/SpiceDB server. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#responsespec","title":"ResponseSpec","text":"Field Type Required Description unauthenticated
Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthenticated. (Default: 401 Unauthorized
) unauthorized
Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthorized. (Default: 403 Forbidden
) success
SuccessResponseSpec No Response items to be included in the auth response when the request is authenticated and authorized."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponsespec","title":"SuccessResponseSpec","text":"Field Type Required Description headers
MapSuccessResponseItem> No Custom success response items wrapped as HTTP headers to be injected in the request. dynamicMetadata
MapSuccessResponseItem> No Custom success response items wrapped as Envoy Dynamic Metadata. Use it to pass data along to other proxy filters, such as the rate-limit filter."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponseitem","title":"SuccessResponseItem","text":"Field Type Required Description plain
Plain text response item No Plain text content. Use one of: plain
, json
, wristband
. json
JSON injection response item No Specification of a JSON object. Use one of: plain
, json
, wristband
. wristband
Festival Wristband token response item No Specification of a JSON object. Use one of: plain
, json
, wristband
. key
String No The key used to add the custom response item (name of the HTTP header or root property of the Dynamic Metadata object). Defaults to the name of the response item if omitted."},{"location":"kuadrant-operator/doc/reference/authpolicy/#callbackrule","title":"CallbackRule","text":"Field Type Required Description http
HTTP endpoints callback spec No HTTP endpoint settings to build the callback request (webhook). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#namedpattern","title":"NamedPattern","text":"Field Type Required Description selector
String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value
, using the operator
. operator
String Yes The binary operator to be applied to the resolved value specified by the selector. One of: eq
(equal to), neq
(not equal to), incl
(includes; for arrays), excl
(excludes; for arrays), matches
(regex). value
String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicystatus","title":"AuthPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/authpolicy/#conditionspec","title":"ConditionSpec","text":" - The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
- The message field is a human-readable message indicating details about the transition.
- The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
- The status field is a string, with possible values True, False, and Unknown.
- The type field is a string with the following possible values:
- Available: the resource has successfully configured;
Field Type Description type
String Condition Type status
String Status: True, False, Unknown reason
String Condition state reason message
String Condition state description lastTransitionTime
Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/dnspolicy/","title":"The DNSPolicy Custom Resource Definition (CRD)","text":" - DNSPolicy
- DNSPolicySpec
- HealthCheckSpec
- LoadBalancingSpec
- LoadBalancingWeighted
- CustomWeight
- LoadBalancingGeo
- DNSPolicyStatus
- HealthCheckStatus
"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicy","title":"DNSPolicy","text":"Field Type Required Description spec
DNSPolicySpec Yes The specification for DNSPolicy custom resource status
DNSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicyspec","title":"DNSPolicySpec","text":"Field Type Required Description targetRef
Gateway API PolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to healthCheck
HealthCheckSpec No HealthCheck spec loadBalancing
LoadBalancingSpec Yes(loadbalanced only) LoadBalancing Spec, required when routingStrategy is \"loadbalanced\" routingStrategy
String (immutable) Yes Immutable! Routing Strategy to use, one of \"simple\" or \"loadbalanced\""},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description endpoint
String Yes Endpoint is the path to append to the host to reach the expected health check port
Number Yes Port to connect to the host on protocol
String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold
Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingspec","title":"LoadBalancingSpec","text":"Field Type Required Description weighted
LoadBalancingWeighted Yes Weighted routing spec geo
LoadBalancingGeo Yes Geo routing spec"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingweighted","title":"LoadBalancingWeighted","text":"Field Type Required Description defaultWeight
Number Yes Default weight to apply to created records custom
[]CustomWeight No Custom weights to manipulate records weights based on label selectors"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#customweight","title":"CustomWeight","text":"Field Type Description selector
metav1.LabelSelector Label Selector to specify resources that should have this weight applied weight
Number Weight value to apply for matching resources"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancinggeo","title":"LoadBalancingGeo","text":"Field Type Required Description defaultGeo
String Yes Default geo to apply to records"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicystatus","title":"DNSPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource. healthCheck
HealthCheckStatus HealthCheck status. recordConditions
[String][]Kubernetes meta/v1.Condition Status of individual DNSRecords owned by this policy."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/kuadrant/","title":"The Kuadrant Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrant","title":"kuadrant","text":"Note on Limitador The Kuadrant operator creates a Limitador CR named `limitador` in the same namespace as the Kuadrant CR. If there is a pre-existing Limitador CR of the same name the kuadrant operator will take ownership of that Limitador CR. Field Type Required Description spec
KuadrantSpec No The specification for Kuadrant custom resource. status
KuadrantStatus No The status for the custom resources."},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantspec","title":"KuadrantSpec","text":"Field Type Required Description limitador
Limitador No Configure limitador deployments."},{"location":"kuadrant-operator/doc/reference/kuadrant/#limitador","title":"Limitador","text":"Field Type Required Description affinity
Affinity No Describes the scheduling rules for limitador pods. replicas
Number No Sets the number of limitador replicas to deploy. resourceRequirements
ResourceRequirements No Set the resource requirements for limitador pods. pdb
PodDisruptionBudgetType No Configure allowed pod disruption budget fields. storage
Storage No Define backend storage option for limitador."},{"location":"kuadrant-operator/doc/reference/kuadrant/#poddisruptionbudgettype","title":"PodDisruptionBudgetType","text":"Field Type Required Description maxUnavailable
Number No An eviction is allowed if at most \"maxUnavailable\" limitador pods are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\". minAvailable
Number No An eviction is allowed if at least \"minAvailable\" limitador pods will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\"."},{"location":"kuadrant-operator/doc/reference/kuadrant/#storage","title":"Storage","text":"Field Type Required Description redis
Redis No Uses Redis to store limitador counters. redis-cached
RedisCached No Uses Redis to store limitador counters, with an in-memory cache disk
Disk No Counters are held on disk (persistent). Kubernetes Persistent Volumes will be used to store counters."},{"location":"kuadrant-operator/doc/reference/kuadrant/#redis","title":"Redis","text":"Field Type Required Description configSecretRef
LocalObjectReference No ConfigSecretRef refers to the secret holding the URL for Redis."},{"location":"kuadrant-operator/doc/reference/kuadrant/#rediscached","title":"RedisCached","text":"Field Type Required Description configSecretRef
LocalObjectReference No ConfigSecretRef refers to the secret holding the URL for Redis. options
Options No Configures a number of caching options for limitador."},{"location":"kuadrant-operator/doc/reference/kuadrant/#options","title":"Options","text":"Field Type Required Description ttl
Number No TTL for cached counters in milliseconds [default: 5000] ratio
Number No Ratio to apply to the TTL from Redis on cached counters [default: 10] flush-period
Number No FlushPeriod for counters in milliseconds [default: 1000] max-cached
Number No MaxCached refers to the maximum amount of counters cached [default: 10000]"},{"location":"kuadrant-operator/doc/reference/kuadrant/#disk","title":"Disk","text":"Field Type Required Description persistentVolumeClaim
PVCGenericSpec No Configure resources for PVC. Optimize
String No Defines optimization option of the disk persistence type. Valid options: \"throughput\", \"disk\""},{"location":"kuadrant-operator/doc/reference/kuadrant/#pvcgenericspec","title":"PVCGenericSpec","text":"Field Type Required Description storageClassName
String No Storage class name resources
PersistentVolumeClaimResources No Resources represent the minimum resources the volume should have volumeName
String No VolumeName is the binding reference to the PersistentVolume backing the claim"},{"location":"kuadrant-operator/doc/reference/kuadrant/#persistentvolumeclaimresources","title":"PersistentVolumeClaimResources","text":"Field Type Required Description requests
Quantity Yes Storage resources requests to be used on the persisitentVolumeClaim"},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantstatus","title":"KuadrantStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/","title":"The RateLimitPolicy Custom Resource Definition (CRD)","text":" - RateLimitPolicy
- RateLimitPolicySpec
- RateLimitPolicyCommonSpec
- Limit
- RateLimit
- WhenCondition
- RateLimitPolicyStatus
- ConditionSpec
"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicy","title":"RateLimitPolicy","text":"Field Type Required Description spec
RateLimitPolicySpec Yes The specification for RateLimitPolicy custom resource status
RateLimitPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicyspec","title":"RateLimitPolicySpec","text":"Field Type Required Description targetRef
PolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to defaults
RateLimitPolicyCommonSpec No Default limit definitions. This field is mutually exclusive with the limits
field overrides
RateLimitPolicyCommonSpec No Overrides limit definitions. This field is mutually exclusive with the limits
field and defaults
field. This field is only allowed for policies targeting Gateway
in targetRef.kind
limits
MapLimit> No Limit definitions. This field is mutually exclusive with the defaults
field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicycommonspec","title":"RateLimitPolicyCommonSpec","text":"Field Type Required Description limits
MapLimit> No Explicit Limit definitions. This field is mutually exclusive with RateLimitPolicySpec limits
field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#limit","title":"Limit","text":"Field Type Required Description rates
[]RateLimit No List of rate limits associated with the limit definition counters
[]String No List of rate limit counter qualifiers. Items must be a valid Well-known attribute. Each distinct value resolved in the data plane starts a separate counter for each rate limit. routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the limit. At least one HTTPRouteRule must be selected to activate the limit. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the limit. Do not use it in policies targeting a Gateway. when
[]WhenCondition No List of additional dynamic conditions (expressions) to activate the limit. All expression must evaluate to true for the limit to be applied. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimit","title":"RateLimit","text":"Field Type Required Description limit
Number Yes Maximum value allowed within the given period of time (duration) duration
Number Yes The period of time in the specified unit that the limit applies unit
String Yes Unit of time for the duration of the limit. One-of: \"second\", \"minute\", \"hour\", \"day\"."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#whencondition","title":"WhenCondition","text":"Field Type Required Description selector
String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value
, using the operator
. operator
String Yes The binary operator to be applied to the resolved value specified by the selector. One-of: \"eq\" (equal to), \"neq\" (not equal to) value
String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicystatus","title":"RateLimitPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#conditionspec","title":"ConditionSpec","text":" - The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
- The message field is a human-readable message indicating details about the transition.
- The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
- The status field is a string, with possible values True, False, and Unknown.
- The type field is a string with the following possible values:
- Available: the resource has successfully configured;
Field Type Description type
String Condition Type status
String Status: True, False, Unknown reason
String Condition state reason message
String Condition state description lastTransitionTime
Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/route-selectors/","title":"Route selectors","text":"The route selectors of a policy spec or policy rule (limit definition or auth rule) allow to specify selectors of routes or parts of a route, that transitively induce a set of conditions for a policy or policy rule to be enforced. It is defined as a set of HTTP route matching rules, where these matching rules must exist, partially or identically stated within the HTTPRouteRules of the HTTPRoute that is targeted by the policy.
"},{"location":"kuadrant-operator/doc/reference/route-selectors/#the-routeselectors-field","title":"The routeSelectors
field","text":"The routeSelectors
field can be found in policy specs and policy rules (limit definition or auth rule).
Field Type Required Description routeSelectors
[]RouteSelector No List of route selectors of HTTPRouteRules whose HTTPRouteMatches activate the policy or policy rule."},{"location":"kuadrant-operator/doc/reference/route-selectors/#routeselector","title":"RouteSelector","text":"Each RouteSelector
is an object composed of a set of HTTPRouteMatch objects (from Gateway API), and an additional hostnames
field.
Field Type Required Description matches
[]HTTPRouteMatch No List of selectors of HTTPRouteRules whose matching rules activate the policy or policy rule hostnames
[]Hostname No List of hostnames of the HTTPRoute that activate the policy or policy rule"},{"location":"kuadrant-operator/doc/reference/route-selectors/#mechanics-of-the-route-selectors","title":"Mechanics of the route selectors","text":"Route selectors matches and the HTTPRoute's HTTPRouteMatches are pairwise compared to select or not select HTTPRouteRules that should activate a policy rule. To decide whether the route selector selects a HTTPRouteRule or not, for each pair of route selector HTTPRouteMatch and HTTPRoute HTTPRouteMatch:
- The route selector selects the HTTPRoute's HTTPRouteRule if the HTTPRouteRule contains at least one HTTPRouteMatch that specifies fields that are literally identical to all the fields specified by at least one HTTPRouteMatch of the route selector.
- A HTTPRouteMatch within a HTTPRouteRule may include other fields that are not specified in a route selector match, and yet the route selector match selects the HTTPRouteRule if all fields of the route selector match are identically included in the HTTPRouteRule's HTTPRouteMatch; the opposite is NOT true.
- Each field
path
of a HTTPRouteMatch, as well as each field method
of a HTTPRouteMatch, as well as each element of the fields headers
and queryParams
of a HTTPRouteMatch, is atomic \u2013 this is true for the HTTPRouteMatches within a HTTPRouteRule, as well as for HTTPRouteMatches of a route selector.
Additionally, at least one hostname specified in a route selector must identically match one of the hostnames specified (or inherited, when omitted) by the targeted HTTPRoute.
The semantics of the route selectors allows to assertively relate policy rule definitions to routing rules, with benefits for identifying the subsets of the network that are covered by a policy rule, while preventing unreachable definitions, as well as the overhead associated with the maintenance of such rules across multiple resources throughout time, according to network topology beneath. Moreover, the requirement of not having to be a full copy of the targeted HTTPRouteRule matches, but only partially identical, helps prevent repetition to some degree, as well as it enables to more easily define policy rules that scope across multiple HTTPRouteRules (by specifying less rules in the selector).
"},{"location":"kuadrant-operator/doc/reference/route-selectors/#golden-rules-and-corner-cases","title":"Golden rules and corner cases","text":"A few rules and corner cases to keep in mind while using the RLP's routeSelectors
:
- The golden rule \u2013 The route selectors in a policy or policy rule are not to be interpreted as the route matching rules that activate the policy or policy rule, but as selectors of the route rules that activate the policy or policy rule.
- Due to (1) above, this can lead to cases, e.g., where a route selector that states
matches: [{ method: POST }]
selects a HTTPRouteRule that defines matches: [{ method: POST }, { method: GET }]
, effectively causing the policy or policy rule to be activated on requests to the HTTP method POST
, but also to the HTTP method GET
. - The requirement for the route selector match to state patterns that are identical to the patterns stated by the HTTPRouteRule (partially or entirely) makes, e.g., a route selector such as
matches: { path: { type: PathPrefix, value: /foo } }
to select a HTTPRouteRule that defines matches: { path: { type: PathPrefix, value: /foo }, method: GET }
, but not to select a HTTPRouteRule that only defines matches: { method: GET }
, even though the latter includes technically all HTTP paths; nor it selects a HTTPRouteRule that only defines matches: { path: { type: Exact, value: /foo } }
, even though all requests to the exact path /foo
are also technically requests to /foo*
. - The atomicity property of fields of the route selectors makes, e.g., a route selector such as
matches: { path: { value: /foo } }
to select a HTTPRouteRule that defines matches: { path: { value: /foo } }
, but not to select a HTTPRouteRule that only defines matches: { path: { type: PathPrefix, value: /foo } }
. (This case may actually never happen because PathPrefix
is the default value for path.type
and will be set automatically by the Kubernetes API server.)
Due to the nature of route selectors of defining pointers to HTTPRouteRules, the routeSelectors
field is not supported in a RLP that targets a Gateway resource.
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/","title":"The TLSPolicy Custom Resource Definition (CRD)","text":" - TLSPolicy
- TLSPolicySpec
- TLSPolicyStatus
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicy","title":"TLSPolicy","text":"Field Type Required Description spec
TLSPolicySpec Yes The specification for TLSPolicy custom resource status
TLSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicyspec","title":"TLSPolicySpec","text":"Field Type Required Description targetRef
Gateway API PolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to issuerRef
CertManager meta/v1.ObjectReference Yes IssuerRef is a reference to the issuer for the created certificate commonName
String No CommonName is a common name to be used on the created certificate duration
Kubernetes meta/v1.Duration No The requested 'duration' (i.e. lifetime) of the created certificate. renewBefore
Kubernetes meta/v1.Duration No How long before the currently issued certificate's expiry cert-manager should renew the certificate. usages
[]CertManager v1.KeyUsage No Usages is the set of x509 usages that are requested for the certificate. Defaults to digital signature
and key encipherment
if not specified revisionHistoryLimit
Number No RevisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history privateKey
CertManager meta/v1.CertificatePrivateKey No Options to control private keys used for the Certificate IssuerRef certmanmetav1.ObjectReference
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicystatus","title":"TLSPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/","title":"Enforcing authentication & authorization with Kuadrant AuthPolicy","text":"This guide walks you through the process of setting up a local Kubernetes cluster with Kuadrant where you will protect Gateway API endpoints by declaring Kuadrant AuthPolicy custom resources.
Two AuthPolicies will be declared:
Use case AuthPolicy App developer 1 AuthPolicy targeting a HTTPRoute that routes traffic to a sample Toy Store application, and enforces API key authentication to all requests in this route, as well as requires API key owners to be mapped to groups:admins
metadata to access a specific HTTPRouteRule of the route. Platform engineer use-case 1 AuthPolicy targeting the istio-ingressgateway
Gateway that enforces a trivial \"deny-all\" policy that locks down any other HTTPRoute attached to the Gateway. Topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (AuthPolicy) \u2502\n \u2502 gw-auth \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u25bc\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Gateway) \u2502\n \u2502 istio-ingressgateway \u2502\n \u250c\u2500\u2500\u2500\u2500\u25ba\u2502 \u2502\u25c4\u2500\u2500\u2500\u2510\n \u2502 \u2502 * \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (HTTPRoute) \u2502 \u2502 (HTTPRoute) \u2502\n \u2502 toystore \u2502 \u2502 other \u2502\n \u2502 \u2502 \u2502 \u2502\n \u2502 api.toystore.com \u2502 \u2502 *.other-apps.com \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (AuthPolicy) \u2502\n \u2502 toystore \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#requisites","title":"Requisites","text":" - Docker
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#run-the-guide-1-4","title":"Run the guide \u2460 \u2192 \u2463","text":""},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#1-setup-persona-cluster-admin","title":"\u2460 Setup (Persona: Cluster admin)","text":"Clone the repo:
git clone git@github.com:Kuadrant/kuadrant-operator.git && cd kuadrant-operator\n
Run the following command to create a local Kubernetes cluster with Kind, install & deploy Kuadrant:
make local-setup\n
Request an instance of Kuadrant in the kuadrant-system
namespace:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#2-deploy-the-toy-store-sample-application-persona-app-developer","title":"\u2461 Deploy the Toy Store sample application (Persona: App developer)","text":"kubectl apply -f examples/toystore/toystore.yaml\n\nkubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/cars\"\n - method: GET\n path:\n type: PathPrefix\n value: \"/dolls\"\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Send requests to the application unprotected:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/dolls -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#3-protect-the-toy-store-application-persona-app-developer","title":"\u2462 Protect the Toy Store application (Persona: App developer)","text":"Create the AuthPolicy to enforce the following auth rules:
- Authentication:
- All users must present a valid API key
- Authorization:
/admin*
routes require user mapped to the admins
group (kuadrant.io/groups=admins
annotation added to the Kubernetes API key Secret)
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-authn\":\n apiKey:\n selector: {}\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"only-admins\":\n opa:\n rego: |\n groups := split(object.get(input.auth.identity.metadata.annotations, \"kuadrant.io/groups\", \"\"), \",\")\n allow { groups[_] == \"admins\" }\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\nEOF\n
Create the API keys:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-regular-user\n labels:\n authorino.kuadrant.io/managed-by: authorino\nstringData:\n api_key: iamaregularuser\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-admin-user\n labels:\n authorino.kuadrant.io/managed-by: authorino\n annotations:\n kuadrant.io/groups: admins\nstringData:\n api_key: iamanadmin\ntype: Opaque\nEOF\n
Send requests to the application protected by Kuadrant:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 401 Unauthorized\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 403 Forbidden\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamanadmin' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#4-create-a-default-deny-all-policy-at-the-level-of-the-gateway-persona-platform-engineer","title":"\u2463 Create a default \"deny-all\" policy at the level of the gateway (Persona: Platform engineer)","text":"Create the policy:
kubectl -n istio-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: gw-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: istio-ingressgateway\n rules:\n authorization:\n deny-all:\n opa:\n rego: \"allow = false\"\n response:\n unauthorized:\n headers:\n \"content-type\":\n value: application/json\n body:\n value: |\n {\n \"error\": \"Forbidden\",\n \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n }\nEOF\n
The policy won't be effective until there is at least one accepted route not yet protected by another more specific policy attached to it.
Create a route that will inherit the default policy attached to the gateway:
kubectl apply -f -<<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: other\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.other-apps.com\"\nEOF\n
Send requests to the route protected by the default policy set at the level of the gateway:
curl -H 'Host: foo.other-apps.com' http://$GATEWAY_URL/ -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/","title":"Authenticated Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure authenticated rate limiting for an application using Kuadrant.
Authenticated rate limiting rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:
- User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
- Each user can send up to 20rpm (\"request per minute\").
- Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.
In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy
, to mimic an operation of reading toy records.
We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.
User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#run-the-steps-1-4","title":"Run the steps \u2460 \u2192 \u2463","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Create the deployment:
kubectl apply -f examples/toystore/toystore.yaml\n
Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - path:\n type: Exact\n value: \"/toy\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Verify the route works:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#3-enforce-authentication-on-requests-to-the-toy-store-api","title":"\u2462 Enforce authentication on requests to the Toy Store API","text":"Create a Kuadrant AuthPolicy
to configure the authentication:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels:\n app: toystore\n allNamespaces: true\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
Verify the authentication works by sending a request to the Toy Store API without API key:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n
Create API keys for users alice
and bob
to authenticate:
Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.
kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: bob-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: bob\nstringData:\n api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: alice-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: alice\nstringData:\n api_key: IAMALICE\ntype: Opaque\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#4-enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2463 Enforce authenticated rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"alice-limit\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n \"bob-limit\":\n rates:\n - limit: 2\n duration: 10\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: bob\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests as Alice and Bob.
Up to 5 successful (200 OK
) requests every 10 seconds allowed for Alice, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Up to 2 successful (200 OK
) requests every 10 seconds allowed for Bob, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/","title":"Authenticated Rate Limiting with JWTs and Kubernetes RBAC","text":"This user guide walks you through an example of how to use Kuadrant to protect an application with policies to enforce:
- authentication based OpenId Connect (OIDC) ID tokens (signed JWTs), issued by a Keycloak server;
- alternative authentication method by Kubernetes Service Account tokens;
- authorization delegated to Kubernetes RBAC system;
- rate limiting by user ID.
In this example, we will protect a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request.
The API listens to requests at the hostnames *.toystore.com
, where it exposes the endpoints GET /toy*
, POST /admin/toy
and DELETE /amind/toy
, respectively, to mimic operations of reading, creating, and deleting toy records.
Any authenticated user/service account can send requests to the Toy Store API, by providing either a valid Keycloak-issued access token or Kubernetes token.
Privileges to execute the requested operation (read, create or delete) will be granted according to the following RBAC rules, stored in the Kubernetes authorization system:
Operation Endpoint Required role Read GET /toy*
toystore-reader
Create POST /admin/toy
toystore-write
Delete DELETE /admin/toy
toystore-write
Each user will be entitled to a maximum of 5rp10s (5 requests every 10 seconds).
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#requirements","title":"Requirements","text":" - Docker
- kubectl command-line tool
- jq
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#run-the-guide-1-6","title":"Run the guide \u2460 \u2192 \u2465","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#1-setup-a-cluster-with-kuadrant","title":"\u2460 Setup a cluster with Kuadrant","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Deploy the application in the default
namespace:
kubectl apply -f examples/toystore/toystore.yaml\n
Route traffic to the application:
kubectl apply -f examples/toystore/httproute.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#api-lifecycle","title":"API lifecycle","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-unprotected","title":"Try the API unprotected","text":"Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
It should return 200 OK
.
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#3-deploy-keycloak","title":"\u2462 Deploy Keycloak","text":"Create the namesapce:
kubectl create namespace keycloak\n
Deploy Keycloak with a bootstrap realm, users, and clients:
kubectl apply -n keycloak -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Note: The Keycloak server may take a couple of minutes to be ready.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#4-enforce-authentication-and-authorization-for-the-toy-store-api","title":"\u2463 Enforce authentication and authorization for the Toy Store API","text":"Create a Kuadrant AuthPolicy
to configure authentication and authorization:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore-protection\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"keycloak-users\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n \"k8s-service-accounts\":\n kubernetesTokenReview:\n audiences:\n\n - https://kubernetes.default.svc.cluster.local\n overrides:\n \"sub\":\n selector: auth.identity.user.username\n authorization:\n \"k8s-rbac\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.sub\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.sub\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-missing-authentication","title":"Try the API missing authentication","text":"curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-users\"\n# www-authenticate: Bearer realm=\"k8s-service-accounts\"\n# x-ext-auth-reason: {\"k8s-service-accounts\":\"credential not found\",\"keycloak-users\":\"credential not found\"}\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-without-permission","title":"Try the API without permission","text":"Obtain an access token with the Keycloak server:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
Send a request to the API as the Keycloak-authenticated user while still missing permissions:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
Create a Kubernetes Service Account to represent a consumer of the API associated with the alternative source of identities k8s-service-accounts
:
kubectl apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: client-app-1\nEOF\n
Obtain an access token for the client-app-1
service account:
SA_TOKEN=$(kubectl create token client-app-1)\n
Send a request to the API as the service account while still missing permissions:
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#5-grant-access-to-the-toy-store-api-for-user-and-service-account","title":"\u2464 Grant access to the Toy Store API for user and service account","text":"Create the toystore-reader
and toystore-writer
roles:
kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: toystore-reader\nrules:\n\n- nonResourceURLs: [\"/toy*\"]\n verbs: [\"get\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: toystore-writer\nrules:\n- nonResourceURLs: [\"/admin/toy\"]\n verbs: [\"post\", \"delete\"]\nEOF\n
Add permissions to the user and service account:
User Kind Roles john User registered in Keycloak toystore-reader
, toystore-writer
client-app-1 Kuberentes Service Account toystore-reader
kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: toystore-readers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: toystore-reader\nsubjects:\n\n- kind: User\n name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\n- kind: ServiceAccount\n name: client-app-1\n namespace: default\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: toystore-writers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: toystore-writer\nsubjects:\n- kind: User\n name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\nEOF\n
Q: Can I use Roles
and RoleBindings
instead of ClusterRoles
and ClusterRoleBindings
? Yes, you can.
The example above is for non-resource URL Kubernetes roles. For using Roles
and RoleBindings
instead of ClusterRoles
and ClusterRoleBindings
, thus more flexible resource-based permissions to protect the API, see the spec for Kubernetes SubjectAccessReview authorization in the Authorino docs.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-with-permission","title":"Try the API with permission","text":"Send requests to the API as the Keycloak-authenticated user:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 200 OK\n
Send requests to the API as the Kubernetes service account:
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#6-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2465 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"per-user\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n counters:\n - metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-rate-limited","title":"Try the API rate limited","text":"Each user should be entitled to a maximum of 5 requests every 10 seconds.
Note: If the tokens have expired, you may need to refresh them first.
Send requests as the Keycloak-authenticated user:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Send requests as the Kubernetes service account:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/","title":"Gateway DNS for Cluster Operators","text":"This user guide walks you through an example of how to configure DNS for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#requisites","title":"Requisites","text":" - Docker
- Rout53 Hosted Zone
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#setup","title":"Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Create a namespace:
kubectl create namespace my-gateways\n
Export a root domain and hosted zone id:
export ROOT_DOMAIN=<ROOT_DOMAIN>\nexport AWS_HOSTED_ZONE_ID=<AWS_HOSTED_ZONE_ID>\n
Note: ROOT_DOMAIN and AWS_HOSTED_ZONE_ID should be set to your AWS hosted zone name and id respectively.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#create-a-managedzone","title":"Create a ManagedZone","text":"Create AWS credentials secret
export AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY>\n\nkubectl -n my-gateways create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
Create a ManagedZone
kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: $ROOT_DOMAIN\nspec:\n id: $AWS_HOSTED_ZONE_ID\n domainName: $ROOT_DOMAIN\n description: \"my managed zone\"\n dnsProviderSecretRef:\n name: aws-credentials\nEOF\n
Check it's ready
kubectl get managedzones -n my-gateways\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"Create a gateway using your ROOT_DOMAIN as part of a listener hostname:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$ROOT_DOMAIN\"\n port: 80\n protocol: HTTP\nEOF\n
Check gateway status:
kubectl get gateway prod-web -n my-gateways\n
Response: NAME CLASS ADDRESS PROGRAMMED AGE\nprod-web istio 172.18.200.1 True 25s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#enable-dns-on-the-gateway","title":"Enable DNS on the gateway","text":"Create a Kuadrant DNSPolicy
to configure DNS:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\nEOF\n
Check policy status:
kubectl get dnspolicy -o wide -n my-gateways\n
Response: NAME STATUS TARGETREFKIND TARGETREFNAME AGE\nprod-web Accepted Gateway prod-web 26s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#deploy-a-sample-api-to-test-dns","title":"Deploy a sample API to test DNS","text":"Deploy the sample API:
kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n
Route traffic to the API from our gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: prod-web\n namespace: my-gateways\n hostnames:\n - \"*.$ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
Verify a DNSRecord resource is created:
kubectl get dnsrecords -n my-gateways\nNAME READY\nprod-web-api True\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#verify-dns-works-by-sending-requests","title":"Verify DNS works by sending requests","text":"Verify DNS using dig:
dig foo.$ROOT_DOMAIN +short\n
Response: 172.18.200.1\n
Verify DNS using curl:
curl http://api.$ROOT_DOMAIN\n
Response: {\n \"method\": \"GET\",\n \"path\": \"/\",\n \"query_string\": null,\n \"body\": \"\",\n \"headers\": {\n \"HTTP_HOST\": \"api.$ROOT_DOMAIN\",\n \"HTTP_USER_AGENT\": \"curl/7.85.0\",\n \"HTTP_ACCEPT\": \"*/*\",\n \"HTTP_X_FORWARDED_FOR\": \"10.244.0.1\",\n \"HTTP_X_FORWARDED_PROTO\": \"http\",\n \"HTTP_X_ENVOY_INTERNAL\": \"true\",\n \"HTTP_X_REQUEST_ID\": \"9353dd3d-0fe5-4404-86f4-a9732a9c119c\",\n \"HTTP_X_ENVOY_DECORATOR_OPERATION\": \"toystore.my-gateways.svc.cluster.local:80/*\",\n \"HTTP_X_ENVOY_PEER_METADATA\": \"ChQKDkFQUF9DT05UQUlORVJTEgIaAAoaCgpDTFVTVEVSX0lEEgwaCkt1YmVybmV0ZXMKHQoMSU5TVEFOQ0VfSVBTEg0aCzEwLjI0NC4wLjIyChkKDUlTVElPX1ZFUlNJT04SCBoGMS4xNy4yCtcBCgZMQUJFTFMSzAEqyQEKIwoVaXN0aW8uaW8vZ2F0ZXdheS1uYW1lEgoaCHByb2Qtd2ViChkKDGlzdGlvLmlvL3JldhIJGgdkZWZhdWx0CjMKH3NlcnZpY2UuaXN0aW8uaW8vY2Fub25pY2FsLW5hbWUSEBoOcHJvZC13ZWItaXN0aW8KLwojc2VydmljZS5pc3Rpby5pby9jYW5vbmljYWwtcmV2aXNpb24SCBoGbGF0ZXN0CiEKF3NpZGVjYXIuaXN0aW8uaW8vaW5qZWN0EgYaBHRydWUKGgoHTUVTSF9JRBIPGg1jbHVzdGVyLmxvY2FsCigKBE5BTUUSIBoecHJvZC13ZWItaXN0aW8tYzU0NWQ4ZjY4LTdjcjg2ChoKCU5BTUVTUEFDRRINGgtteS1nYXRld2F5cwpWCgVPV05FUhJNGktrdWJlcm5ldGVzOi8vYXBpcy9hcHBzL3YxL25hbWVzcGFjZXMvbXktZ2F0ZXdheXMvZGVwbG95bWVudHMvcHJvZC13ZWItaXN0aW8KFwoRUExBVEZPUk1fTUVUQURBVEESAioACiEKDVdPUktMT0FEX05BTUUSEBoOcHJvZC13ZWItaXN0aW8=\",\n \"HTTP_X_ENVOY_PEER_METADATA_ID\": \"router~10.244.0.22~prod-web-istio-c545d8f68-7cr86.my-gateways~my-gateways.svc.cluster.local\",\n \"HTTP_X_ENVOY_ATTEMPT_COUNT\": \"1\",\n \"HTTP_X_B3_TRACEID\": \"d65f580db9c6a50c471cdb534771c61a\",\n \"HTTP_X_B3_SPANID\": \"471cdb534771c61a\",\n \"HTTP_X_B3_SAMPLED\": \"0\",\n \"HTTP_VERSION\": \"HTTP/1.1\"\n },\n \"uuid\": \"0ecb9f84-db30-4289-a3b8-e22d4021122f\"\n}\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/","title":"Gateway Rate Limiting for Cluster Operators","text":"This user guide walks you through an example of how to configure rate limiting for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#run-the-steps-1-5","title":"Run the steps \u2460 \u2192 \u2464","text":""},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#2-create-the-ingress-gateways","title":"\u2461 Create the ingress gateways","text":"kubectl -n istio-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: external\n annotations:\n kuadrant.io/namespace: kuadrant-system\n networking.istio.io/service-type: ClusterIP\nspec:\n gatewayClassName: istio\n listeners:\n\n - name: external\n port: 80\n protocol: HTTP\n hostname: '*.io'\n allowedRoutes:\n namespaces:\n from: All\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: internal\n annotations:\n kuadrant.io/namespace: kuadrant-system\n networking.istio.io/service-type: ClusterIP\nspec:\n gatewayClassName: istio\n listeners:\n - name: local\n port: 80\n protocol: HTTP\n hostname: '*.local'\n allowedRoutes:\n namespaces:\n from: All\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#3-enforce-rate-limiting-on-requests-incoming-through-the-external-gateway","title":"\u2462 Enforce rate limiting on requests incoming through the external
gateway","text":" \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Gateway) \u2502 \u2502 (Gateway) \u2502\n \u2502 external \u2502 \u2502 internal \u2502\n \u2502 \u2502 \u2502 \u2502\n \u2502 *.io \u2502 \u2502 *.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (RateLimitPolicy) \u2502\n\u2502 gw-rlp \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -n istio-system -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: gw-rlp\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: external\n limits:\n \"global\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#4-deploy-a-sample-api-to-test-rate-limiting-enforced-at-the-level-of-the-gateway","title":"\u2463 Deploy a sample API to test rate limiting enforced at the level of the gateway","text":" \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 (Gateway) \u2502 \u2502 (Gateway) \u2502\n\u2502 (RateLimitPolicy) \u2502 \u2502 external \u2502 \u2502 internal \u2502\n\u2502 gw-rlp \u251c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 *.io \u2502 \u2502 *.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502 \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (HTTPRoute) \u2502\n \u2502 toystore \u2502\n \u2502 \u2502\n \u2502 *.toystore.io \u2502\n \u2502 *.toystore.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Service) \u2502\n \u2502 toystore \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Deploy the sample API:
kubectl apply -f examples/toystore/toystore.yaml\n
Route traffic to the API from both gateways:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: external\n namespace: istio-system\n - name: internal\n namespace: istio-system\n hostnames:\n - \"*.toystore.io\"\n - \"*.toystore.local\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#5-verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"\u2464 Verify the rate limiting works by sending requests in a loop","text":"Expose the gateways, respectively at the port numbers 9081
and 9082
of the local host:
kubectl port-forward -n istio-system service/external-istio 9081:80 2>&1 >/dev/null &\nkubectl port-forward -n istio-system service/internal-istio 9082:80 2>&1 >/dev/null &\n
Up to 5 successful (200 OK
) requests every 10 seconds through the external
ingress gateway (*.io
), then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) through the internal
ingress gateway (*.local
):
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9082 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/","title":"Gateway TLS for Cluster Operators","text":"This user guide walks you through an example of how to configure TLS for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#requisites","title":"Requisites","text":" - Docker
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#setup","title":"Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API, CertManager and Kuadrant itself.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Create a namespace:
kubectl create namespace my-gateways\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"Create a gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.toystore.local\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: toystore-local-tls\n kind: Secret\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#enable-tls-on-the-gateway","title":"Enable TLS on the gateway","text":"The TLSPolicy requires a reference to an existing CertManager Issuer.
Create a CertManager Issuer:
kubectl apply -n my-gateways -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: selfsigned-issuer\nspec:\n selfSigned: {}\nEOF\n
Note: We are using a self-signed issuer here but any supported CerManager issuer or cluster issuer can be used.
kubectl get issuer selfsigned-issuer -n my-gateways\n
Response: NAME READY AGE\nselfsigned-issuer True 18s\n
Create a Kuadrant TLSPolicy
to configure TLS:
kubectl apply -n my-gateways -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: prod-web\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: Issuer\n name: selfsigned-issuer\nEOF\n
Check policy status:
kubectl get tlspolicy -o wide -n my-gateways\n
Response: NAME STATUS TARGETREFKIND TARGETREFNAME AGE\nprod-web Accepted Gateway prod-web 13s\n
Check a Certificate resource was created:
kubectl get certificates -n my-gateways\n
Response NAME READY SECRET AGE\ntoystore-local-tls True toystore-local-tls 7m30s\n
Check a TLS Secret resource was created:
kubectl get secrets -n my-gateways --field-selector=\"type=kubernetes.io/tls\"\n
Response: NAME TYPE DATA AGE\ntoystore-local-tls kubernetes.io/tls 3 7m42s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#deploy-a-sample-api-to-test-tls","title":"Deploy a sample API to test TLS","text":"Deploy the sample API:
kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n
Route traffic to the API from our gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: prod-web\n namespace: my-gateways\n hostnames:\n - \"*.toystore.local\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#verify-tls-works-by-sending-requests","title":"Verify TLS works by sending requests","text":"Get the gateway address@
GWADDRESS=`kubectl get gateway/prod-web -n my-gateways -o=jsonpath='{.status.addresses[?(@.type==\"IPAddress\")].value}'`\necho $GWADDRESS\n
Response: 172.18.200.1\n
Verify we can access the service via TLS:
curl -vkI https://api.toystore.local --resolve \"api.toystore.local:443:$GWADDRESS\"\n
Response: * Added api.toystore.local:443:172.18.200.1 to DNS cache\n* Hostname api.toystore.local was found in DNS cache\n* Trying 172.18.200.1:443...\n* Connected to api.toystore.local (172.18.200.1) port 443 (#0)\n* ALPN: offers h2\n* ALPN: offers http/1.1\n* TLSv1.0 (OUT), TLS header, Certificate Status (22):\n* TLSv1.3 (OUT), TLS handshake, Client hello (1):\n* TLSv1.2 (IN), TLS header, Certificate Status (22):\n* TLSv1.3 (IN), TLS handshake, Server hello (2):\n* TLSv1.2 (IN), TLS header, Finished (20):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):\n* TLSv1.3 (IN), TLS handshake, Certificate (11):\n* TLSv1.3 (IN), TLS handshake, CERT verify (15):\n* TLSv1.3 (IN), TLS handshake, Finished (20):\n* TLSv1.2 (OUT), TLS header, Finished (20):\n* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.3 (OUT), TLS handshake, Finished (20):\n* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384\n* ALPN: server accepted h2\n* Server certificate:\n* subject: [NONE]\n* start date: Feb 15 11:46:50 2024 GMT\n* expire date: May 15 11:46:50 2024 GMT\n* Using HTTP2, server supports multiplexing\n* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* h2h3 [:method: HEAD]\n* h2h3 [:path: /]\n* h2h3 [:scheme: https]\n* h2h3 [:authority: api.toystore.local]\n* h2h3 [user-agent: curl/7.85.0]\n* h2h3 [accept: */*]\n* Using Stream ID: 1 (easy handle 0x5623e4fe5bf0)\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n> HEAD / HTTP/2\n> Host: api.toystore.local\n> user-agent: curl/7.85.0\n> accept: */*\n> \n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* old SSL session ID is stale, removing\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* Connection state changed (MAX_CONCURRENT_STREAMS == 2147483647)!\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n< HTTP/2 200 \nHTTP/2 200 \n< content-type: application/json\ncontent-type: application/json\n< server: istio-envoy\nserver: istio-envoy\n< date: Thu, 15 Feb 2024 12:13:27 GMT\ndate: Thu, 15 Feb 2024 12:13:27 GMT\n< content-length: 1658\ncontent-length: 1658\n< x-envoy-upstream-service-time: 1\nx-envoy-upstream-service-time: 1\n\n< \n\n* Connection #0 to host api.toystore.local left intact\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/","title":"Secure, protect, and connect APIs with Kuadrant on OpenShift","text":""},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#overview","title":"Overview","text":"This guide walks you through using Kuadrant on OpenShift to secure, protect, and connect an API exposed by a Gateway that is based on Kubernetes Gateway API. You can use this walkthrough for a Gateway deployed on a single OpenShift cluster or a Gateway distributed across multiple OpenShift clusters with a shared listener hostname. This guide shows how the platform engineer and application developer user roles can each use Kuadrant to achieve their goals.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#what-kuadrant-can-do-for-you-in-a-multicluster-environment","title":"What Kuadrant can do for you in a multicluster environment","text":"You can leverage Kuadrant's capabilities in single or multiple clusters. The following features are designed to work across multiple clusters as well as in a single-cluster environment.
- Multicluster ingress: Kuadrant provides multicluster ingress connectivity using DNS to bring traffic to your Gateways by using a strategy defined in a
DNSPolicy
. - Global rate limiting: Kuadrant can enable global rate limiting use cases when configured to use a shared Redis store for counters based on limits defined by a
RateLimitPolicy
. - Global auth: You can configure a Kuadrant
AuthPolicy
to leverage external auth providers to ensure that different clusters exposing the same API authenticate and authorize in the same way. - Integration with federated metrics stores: Kuadrant has example dashboards and metrics for visualizing your Gateways and observing traffic hitting those Gateways across multiple clusters.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#user-roles","title":"User roles","text":" -
Platform engineer: This guide walks you through deploying a Gateway that provides secure communication and is protected and ready for use by application development teams to deploy an API. It then walks through using this Gateway in clusters in different geographic regions, leveraging Kuadrant to bring specific traffic to your geo-located Gateways to reduce latency and distribute load, while still being protected and secured with global rate limiting and auth.
-
Application developer: This guide walks through how you can use the Kuadrant OpenAPI Specification (OAS) extensions and kuadrantctl
CLI to generate an HTTPRoute
for your API and to add specific auth and rate limiting requirements.
As an optional extra, this guide highlights how both user roles can observe and monitor these Gateways when the OpenShift user workload monitoring and observability stack is deployed.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#deployment-management-tooling","title":"Deployment management tooling","text":"While this document uses kubectl
commands for simplicity, working with multiple clusters is complex, and it is best to use a tool such as Argo CD to manage the deployment of resources to multiple clusters.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites","title":"Prerequisites","text":"This guide expects that you have successfully installed Kuadrant on at least one OpenShift cluster:
- You have completed the steps in Install Kuadrant on an OpenShift cluster for one or more clusters.
- For multicluster scenarios, you have installed Kuadrant on at least two different OpenShift clusters, and have a shared accessible Redis store.
- You have the
kubectl
command line installed. - Optional: User workload monitoring is configured to remote write to a central storage system such as Thanos, as described in Install Kuadrant on an OpenShift cluster.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#platform-engineer-workflow","title":"Platform engineer workflow","text":"NOTE: You must perform the following steps in each cluster individually, unless specifically excluded.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-1-set-your-environment-variables","title":"Step 1 - Set your environment variables","text":"Set the following environment variables used for convenience in this guide:
export zid=change-this-to-your-zone-id\nexport rootDomain=example.com\nexport gatewayNS=api-gateway\nexport gatewayName=external\nexport devNS=toystore\nexport AWS_ACCESS_KEY_ID=xxxx\nexport AWS_SECRET_ACCESS_KEY=xxxx\nexport AWS_REGION=us-east-1\nexport clusterIssuerName=lets-encrypt\nexport EMAIL=foo@example.com\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-2-set-up-a-managed-dns-zone","title":"Step 2 - Set up a managed DNS zone","text":"The managed DNS zone declares a zone and credentials to access the zone that Kuadrant can use to set up DNS configuration.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#create-the-managedzone-resource","title":"Create the ManagedZone resource","text":"Apply the following ManagedZone
resource and AWS credentials to each cluster. Alternatively, if you are adding an additional cluster, add it to the new cluster:
kubectl create ns ${gatewayNS}\n
Create the zone credentials as follows:
kubectl -n ${gatewayNS} create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
Then create a ManagedZone
as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: managedzone\n namespace: ${gatewayNS}\nspec:\n id: ${zid}\n domainName: ${rootDomain}\n description: \"Kuadrant managed zone\"\n dnsProviderSecretRef:\n name: aws-credentials\nEOF\n
Wait for the ManagedZone
to be ready in each cluster as follows:
kubectl wait managedzone/managedzone --for=condition=ready=true -n ${gatewayNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-3-add-a-tls-issuer","title":"Step 3 - Add a TLS issuer","text":"To secure communication to the Gateways, you will define a TLS issuer for TLS certificates. This example uses Let's Encrypt, but you can use any issuer supported by cert-manager
.
The following example uses Let's Encrypt staging, which you must also apply to all clusters:
kubectl apply -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: ${clusterIssuerName}\nspec:\n acme:\n email: ${EMAIL} \n privateKeySecretRef:\n name: le-secret\n server: https://acme-staging-v02.api.letsencrypt.org/directory\n solvers:\n\n - dns01:\n route53:\n hostedZoneID: ${zid}\n region: ${AWS_REGION}\n accessKeyIDSecretRef:\n key: AWS_ACCESS_KEY_ID\n name: aws-credentials\n secretAccessKeySecretRef:\n key: AWS_SECRET_ACCESS_KEY\n name: aws-credentials\nEOF\n
Then wait for the ClusterIssuer
to become ready as follows:
kubectl wait clusterissuer/${clusterIssuerName} --for=condition=ready=true\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-4-set-up-a-gateway","title":"Step 4 - Set up a Gateway","text":"For Kuadrant to balance traffic using DNS across two or more clusters, you must define a Gateway with a shared host. You will define this by using an HTTPS listener with a wildcard hostname based on the root domain. As mentioned earlier, you must apply these resources to all clusters.
NOTE: For now, the Gateway is set to accept an HTTPRoute
from the same namespace only. This allows you to restrict who can use the Gateway until it is ready for general use.
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: ${gatewayName}\n namespace: ${gatewayNS}\n labels:\n kuadrant.io/gateway: \"true\"\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: Same\n hostname: \"*.${rootDomain}\"\n name: api\n port: 443\n protocol: HTTPS\n tls:\n certificateRefs:\n - group: \"\"\n kind: Secret\n name: api-${gatewayName}-tls\n mode: Terminate\nEOF\n
Check the status of your Gateway as follows:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\nkubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Programmed\")].message}'\n
Your Gateway should be accepted and programmed (valid and assigned an external address). However, if you check your listener status as follows, you will see that it is not yet programmed or ready to accept traffic due to bad TLS configuration:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
Kuadrant can help with this by using a TLSPolicy.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-5-secure-and-protect-the-gateway-with-auth-tls-rate-limit-and-dns-policies","title":"Step 5 - Secure and protect the Gateway with auth, TLS, rate limit, and DNS policies","text":"While your Gateway is now deployed, it has no exposed endpoints and your listener is not programmed. Next, you can set up a TLSPolicy
that leverages your CertificateIssuer to set up your listener certificates.
You will also define an AuthPolicy
that will set up a default 403
response for any unprotected endpoints, as well as a RateLimitPolicy
that will set up a default artificially low global limit to further protect any endpoints exposed by this Gateway.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-auth-policy","title":"Set the Auth policy","text":"Set a default, deny-all AuthPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: ${gatewayName}-auth\n namespace: ${gatewayNS}\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: ${gatewayName}\n defaults:\n rules:\n authorization:\n \"deny\":\n opa:\n rego: \"allow = false\"\nEOF\n
Check that your auth policy was accepted by the controller as follows:
kubectl get authpolicy ${gatewayName}-auth -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-tls-policy","title":"Set the TLS policy","text":"Set the TLSPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: ${gatewayName}-tls\n namespace: ${gatewayNS}\nspec:\n targetRef:\n name: ${gatewayName}\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: ${clusterIssuerName}\nEOF\n
Check that your TLS policy was accepted by the controller as follows:
kubectl get tlspolicy ${gatewayName}-tls -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-rate-limit-policy","title":"Set the rate limit policy","text":"Set the default RateLimitPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: ${gatewayName}-rlp\n namespace: ${gatewayNS}\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: ${gatewayName}\n defaults:\n limits:\n \"low-limit\":\n rates:\n\n - limit: 2\n duration: 10\n unit: second\nEOF\n
To check your rate limits have been accepted, enter the following command:
kubectl get ratelimitpolicy ${gatewayName}-rlp -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-dns-policy","title":"Set the DNS policy","text":"Set the DNSPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: ${gatewayName}-dnspolicy\n namespace: ${gatewayNS}\nspec:\n routingStrategy: loadbalanced\n loadBalancing:\n geo: \n defaultGeo: US \n weighted:\n defaultWeight: 120 \n targetRef:\n name: ${gatewayName}\n group: gateway.networking.k8s.io\n kind: Gateway\nEOF\n
NOTE: The DNSPolicy
will leverage the ManagedZone
that you defined earlier based on the listener hosts defined in the Gateway.
Check that your DNSPolicy
has been accepted as follows:
kubectl get dnspolicy ${gatewayName}-dnspolicy -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#create-an-http-route","title":"Create an HTTP route","text":"Create an HTTPRoute
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: test\n namespace: ${gatewayNS}\nspec:\n parentRefs:\n\n - name: ${gatewayName}\n namespace: ${gatewayNS}\n hostnames:\n - \"test.${rootDomain}\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
Check your Gateway policies are enforced as follows:
kubectl get dnspolicy ${gatewayName}-dnspolicy -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\nkubectl get authpolicy ${gatewayName}-auth -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\nkubectl get ratelimitpolicy ${gatewayName}-rlp -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\n
Check your listener is ready as follows:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-6-test-connectivity-and-deny-all-auth","title":"Step 6 - Test connectivity and deny all auth","text":"You can use curl
to hit your endpoint. You should see a 403
. Because this example uses Let's Encrypt staging, you can pass the -k
flag:
curl -k -w \"%{http_code}\" https://$(kubectl get httproute test -n ${gatewayNS} -o=jsonpath='{.spec.hostnames[0]}')\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-7-opening-up-the-gateway-for-other-namespaces","title":"Step 7 - Opening up the Gateway for other namespaces","text":"Because you have configured the Gateway, secured it with Kuadrant policies, and tested it, you can now open it up for use by other teams in other namespaces:
kubectl patch gateway ${gatewayName} -n ${gatewayNS} --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/listeners/0/allowedRoutes/namespaces/from\", \"value\":\"All\"}]'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-8-extending-this-gateway-to-multiple-clusters-and-configuring-geo-based-routing","title":"Step 8 - Extending this Gateway to multiple clusters and configuring geo-based routing","text":"To distribute this Gateway across multiple clusters, repeat this setup process for each cluster. By default, this will implement a round-robin DNS strategy to distribute traffic evenly across the different clusters. Setting up your Gateways to serve clients based on their geographic location is straightforward with your current configuration.
Assuming that you have deployed Gateway instances across multiple clusters as per this guide, the next step involves updating the DNS controller with the geographic regions of the visible Gateways.
For instance, if you have one cluster in North America and another in the EU, you can direct traffic to these Gateways based on their location by applying the appropriate labels:
For your North American cluster, enter the following command:
kubectl label --overwrite gateway ${gatewayName} kuadrant.io/lb-attribute-geo-code=US -n ${gatewayNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#application-developer-workflow","title":"Application developer workflow","text":"This section of the walkthrough focuses on using an OpenAPI Specification (OAS) to define an API. You will use Kuadrant OAS extensions to specify the routing, authentication, and rate limiting requirements. Next, you will use the kuadrantctl
tool to generate an AuthPolicy
, an HTTPRoute
, and a RateLimitPolicy
, which you will then apply to your cluster to enforce the settings defined in your OAS.
NOTE: While this section uses the kuadrantctl
tool, this is not essential. You can also create and apply an AuthPolicy
, RateLimitPolicy
, and HTTPRoute
by using the oc
or kubectl
commands.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites_1","title":"Prerequisites","text":" - You have installed
kuadrantctl
. You can find a compatible binary and download it from the kuadrantctl releases page. - You have the ability to distribute resources generated by
kuadrantctl
to multiple clusters, as though you are a platform engineer.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-1-deploy-the-toystore-app","title":"Step 1 - Deploy the toystore app","text":"To begin, deploy a new version of the toystore
app to a developer namespace as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/Kuadrant-operator/main/examples/toystore/toystore.yaml -n ${devNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-2-set-up-httproute-and-backend","title":"Step 2 - Set up HTTPRoute and backend","text":"Copy at least one of the following example OAS to a local location:
-
Sample OAS for rate limiting with API key
-
Sample OAS for rate limiting with OIDC
Set up some new environment variables as follows:
export oasPath=examples/oas-apikey.yaml\n# Ensure you still have these environment variables setup from the start of this guide:\nexport rootDomain=example.com\nexport gatewayNS=api-gateway\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-3-use-oas-to-define-your-httproute-rules","title":"Step 3 - Use OAS to define your HTTPRoute rules","text":"You can generate Kuadrant and Gateway API resources directly from OAS documents by using an x-kuadrant
extension.
NOTE: For a more in-depth look at the OAS extension, see the kuadrantctl documentation.
You will use kuadrantctl
to generate your HTTPRoute
.
NOTE: The sample OAS has some placeholders for namespaces and domains. You will inject valid values into these placeholders based on your previous environment variables.
Generate the resource from your OAS as follows, (envsubst
will replace the placeholders):
cat $oasPath | envsubst | kuadrantctl generate gatewayapi httproute --oas - | kubectl apply -f -\n
kubectl get httproute toystore -n ${devNS} -o=yaml\n
You should see that this route is affected by the AuthPolicy
and RateLimitPolicy
defined as defaults on the Gateway in the Gateway namespace.
- lastTransitionTime: \"2024-04-26T13:37:43Z\"\n message: Object affected by AuthPolicy demo/external\n observedGeneration: 2\n reason: Accepted\n status: \"True\"\n type: kuadrant.io/AuthPolicyAffected\n- lastTransitionTime: \"2024-04-26T14:07:28Z\"\n message: Object affected by RateLimitPolicy demo/external\n observedGeneration: 1\n reason: Accepted\n status: \"True\"\n type: kuadrant.io/RateLimitPolicyAffected \n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-4-test-connectivity-and-deny-all-auth","title":"Step 4 - Test connectivity and deny-all auth","text":"You can use curl
to hit an endpoint in the toystore app. Because you are using Let's Encrypt staging in this example, you can pass the -k
flag as follows:
curl -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You are getting a 403
because of the existing default, deny-all AuthPolicy
applied at the Gateway. You can override this for your HTTPRoute
.
Choose one of the following options:
- API key auth flow
- OpenID Connect auth flow
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-5-set-up-api-key-auth-flow","title":"Step 5 - Set up API key auth flow","text":"Set up an example API key in each cluster as follows:
kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: toystore-api-key\n namespace: ${devNS}\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: api_key\nstringData:\n api_key: secret\ntype: Opaque\nEOF\n
Next, generate an AuthPolicy
that uses secrets in your cluster as API keys as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas -\n
From this, you can see an AuthPolicy
generated based on your OAS that will look for API keys in secrets labeled api_key
and look for that key in the header api_key
. You can now apply this to the Gateway as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas - | kubectl apply -f -\n
You should get a 200
from the following GET
because it has no auth requirement:
curl -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should get a 401
for the following POST
request because it does not have any auth requirements:
curl -XPOST -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
Finally, if you add your API key header, with a valid key as follows, you should get a 200
response:
curl -XPOST -H 'api_key: secret' -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#optional-step-6-set-up-openid-connect-auth-flow-skip-if-using-api-key-only","title":"Optional: Step 6 - Set up OpenID Connect auth flow (skip if using API key only)","text":"This section of the walkthrough uses the kuadrantctl
tool to create an AuthPolicy
that integrates with an OpenID provider and a RateLimitPolicy
that leverages JWT values for per-user rate limiting. It is important to note that OpenID requires an external provider. Therefore, you should adapt the following example to suit your specific needs and provider.
The platform engineer workflow established default policies for authentication and rate limiting at your Gateway. The new developer-defined policies, which you will create, are intended to target your HTTPRoute and will supersede the existing policies for requests to your API endpoints, similar to your previous API key example.
The example OAS uses Kuadrant-based extensions. These extensions enable you to define routing and service protection requirements. For more details, see OpenAPI Kuadrant extensions.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites_2","title":"Prerequisites","text":" - You have installed and configured an OpenID Connect provider, such as https://www.keycloak.org/.
- You have a realm, client, and users set up. This example assumes a realm in a Keycloak instance called
toystore
. - Copy the OAS from sample OAS for rate-limiting and OIDC to a local location.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-up-an-openid-authpolicy","title":"Set up an OpenID AuthPolicy","text":"Set the following environment variables:
export openIDHost=some.keycloak.com\nexport oasPath=examples/oas-oidc.yaml\n
NOTE: The sample OAS has some placeholders for namespaces and domains. You will inject valid values into these placeholders based on your previous environment variables.
You can use your OAS and kuadrantctl
to generate an AuthPolicy
to replace the default on the Gateway as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas -\n
If you are happy with the generated resource, you can apply it to the cluster as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas - | kubectl apply -f -\n
You should see in the status of the AuthPolicy
that it has been accepted and enforced:
kubectl get authpolicy -n ${devNS} toystore -o=jsonpath='{.status.conditions}'\n
On your HTTPRoute
, you should also see it now affected by this AuthPolicy
in the toystore namespace:
kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/AuthPolicyAffected\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#test-your-openid-authpolicy","title":"Test your OpenID AuthPolicy","text":"You can test your AuthPolicy
as follows:
export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=toystore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${openIDHost}/auth/realms/toystore/protocol/openid-connect/token\" | jq -r '.access_token')\n
curl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should see a 401
response code. Make a request with a valid bearer token as follows:
curl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should see a 200
response code.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-7-set-up-rate-limiting","title":"Step 7 - Set up rate limiting","text":"Lastly, you can generate your RateLimitPolicy
to add your rate limits, based on your OAS file. Rate limiting is simplified for this walkthrough and is based on either the bearer token or the API key value. There are more advanced examples in the How-to guides on the Kuadrant documentation site, for example: Authenticated rate limiting with JWTs and Kubernetes RBAC.
You can continue to use this sample OAS document, which includes both authentication and a rate limit:
export oasPath=examples/oas-oidc.yaml\n
Again, you should see the rate limit policy accepted and enforced:
kubectl get ratelimitpolicy -n ${devNS} toystore -o=jsonpath='{.status.conditions}'\n
On your HTTRoute
, you should now see it is affected by the RateLimitPolicy
in the same namespace:
kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/RateLimitPolicyAffected\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#test-your-ratelimitpolicy","title":"Test your RateLimitPolicy","text":"You can now test your rate limiting as follows:
NOTE: You might need to wait a minute for the new rate limits to be applied. With the following requests, you should see a number of 429 responses.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#api-key-auth","title":"API Key auth","text":"for i in {1..3}\ndo\nprintf \"request $i \"\ncurl -XPOST -H 'api_key:secret' -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\nprintf \"\\n -- \\n\"\ndone \n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#openid-connect-auth","title":"OpenID Connect auth","text":"export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=toystore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${openIDHost}/auth/realms/toystore/protocol/openid-connect/token\" | jq -r '.access_token')\n
for i in {1..3}\ndo\ncurl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" https://$(kubectl get httproute toystore -n ${devNS}-o=jsonpath='{.spec.hostnames[0]}')/v1/toys\ndone\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#conclusion","title":"Conclusion","text":"You have completed the secure, protect, and connect walkthrough. To learn more about Kuadrant, visit https://docs.kuadrant.io.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/","title":"Secure, protect, and connect services with Kuadrant on Kubernetes","text":""},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#prerequisites","title":"Prerequisites","text":" - You have completed the Single-cluster Quick Start or Multi-cluster Quick Start.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#overview","title":"Overview","text":"In this guide, we will cover the different policies from Kuadrant and how you can use them to secure, protect and connect an Istio-controlled gateway in a single cluster, and how you can set more refined protection on the HTTPRoutes exposed by that gateway.
Here are the steps we will go through:
1) Deploy a sample application
2) Define a new Gateway
3) Ensure TLS-based secure connectivity to the gateway with a TLSPolicy
4) Define a default RateLimitPolicy to set some infrastructure limits on your gateway
5) Define a default AuthPolicy to deny all access to the gateway
6) Define a DNSPolicy to bring traffic to the gateway
7) Override the Gateway's deny-all AuthPolicy with an endpoint-specific policy
8) Override the Gateway rate limits with an endpoint-specific policy
You will need to set the KUBECTL_CONTEXT
environment variable for the kubectl context of the cluster you are targeting. If you have followed the single cluster setup, it should be something like below. Adjust the name of the cluster accordingly if you have followed the multi cluster setup.
#\u00a0Typical single cluster context\nexport KUBECTL_CONTEXT=kind-kuadrant-local\n\n# Example context for additional 'multi cluster' clusters\n# export KUBECTL_CONTEXT=kind-kuadrant-local-1\n
To help with this walk through, you should also set a KUADRANT_ZONE_ROOT_DOMAIN
environment variable to a domain you want to use. If you want to try DNSPolicy, this should also be a domain you have access to the DNS for in AWS Route53 or GCP. E.g.:
export KUADRANT_ZONE_ROOT_DOMAIN=my.domain.iown\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#deploy-the-example-app-we-will-serve-via-our-gateway","title":"\u2776 Deploy the example app we will serve via our gateway","text":"kubectl --context $KUBECTL_CONTEXT apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-a-new-istio-managed-gateway","title":"\u2777 Define a new Istio-managed gateway","text":"kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: api-gateway\n namespace: kuadrant-system\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$KUADRANT_ZONE_ROOT_DOMAIN\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: apps-hcpapps-tls\n kind: Secret\nEOF\n
If you take a look at the gateway status, you will see a TLS status error similar to the following:
message: invalid certificate reference /Secret/apps-hcpapps-tls. secret kuadrant-system/apps-hcpapps-tls not found\n
This is because currently there is not a TLS secret in place. Let's fix that by creating a TLSPolicy.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-tlspolicy","title":"\u2778 Define the TLSPolicy","text":"Note: For convenience, in the setup, we have created a self-signed CA as a cluster issuer in the Kubernetes cluster.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: api-gateway-tls\n namespace: kuadrant-system\nspec:\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: kuadrant-operator-glbc-ca\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait tlspolicy api-gateway-tls -n kuadrant-system --for=condition=accepted\n
Now, if you look at the status of the gateway, you will see the error is gone, and the status of the policy will report the listener as now secured with a TLS certificate and the gateway as affected by the TLS policy.
Our communication with our gateway is now secured via TLS. Note that any new listeners will also be handled by the TLSPolicy.
Let's define a HTTPRoute and test our policy. We will re-use this later on with some of the other policies as well.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1beta1\nkind: HTTPRoute\nmetadata:\n name: toystore\n labels:\n deployment: toystore\n service: toystore\nspec:\n parentRefs:\n\n - name: api-gateway\n namespace: kuadrant-system\n hostnames:\n - \"api.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/cars\"\n - method: GET\n path:\n type: PathPrefix\n value: \"/dolls\"\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
With this HTTPRoute in place, the service we deployed is exposed via the gateway. We should be able to access our endpoint via HTTPS:
export INGRESS_HOST=$(kubectl --context $KUBECTL_CONTEXT get gtw api-gateway -o jsonpath='{.status.addresses[0].value}' -n kuadrant-system)\n\ncurl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-infrastructure-rate-limiting","title":"\u2779 Define Infrastructure Rate Limiting","text":"We have a secure communication in place. However, there is nothing limiting users from overloading our infrastructure and service components that will sit behind this gateway. Let's add a rate limiting layer to protect our services and infrastructure.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: infra-ratelimit\n namespace: kuadrant-system\nspec:\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\n limits:\n \"global\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait ratelimitpolicy infra-ratelimit -n kuadrant-system --for=condition=accepted\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
The limit here is artificially low in order for us to show it working easily. Let's test it with our endpoint:
for i in {1..10}; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" && sleep 1; done\n
We should see 409 Too Many Requests
s start returning after the 5th request.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-gateway-authpolicy","title":"\u277a Define the Gateway AuthPolicy","text":"Communication is secured and we have some protection for our infrastructure, but we do not trust any client to access our endpoints. By default, we want to allow only authenticated access. To protect our gateway, we will add a deny-all AuthPolicy. Later, we will override this with a more specific AuthPolicy for the API.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: deny-all\n namespace: kuadrant-system\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: api-gateway\n rules:\n authorization:\n deny-all:\n opa:\n rego: \"allow = false\"\n response:\n unauthorized:\n headers:\n \"content-type\":\n value: application/json\n body:\n value: |\n {\n \"error\": \"Forbidden\",\n \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n }\nEOF\n
Let's test it again. This time we expect a 403 Forbidden
.
curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-dnspolicy","title":"\u277b Define the DNSPolicy","text":"(Skip this step if you did not configure a DNS provider during the setup.)
Now, we have our gateway protected and communications secured. We are ready to configure DNS, so it is easy for clients to connect and access the APIs we intend to expose via this gateway. Note that during the setup of this walk through, we created a DNS Provider secret and a ManagedZone resource.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: simple-dnspolicy\n namespace: kuadrant-system\nspec:\n routingStrategy: simple\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait dnspolicy simple-dnspolicy -n kuadrant-system --for=condition=enforced\n
If you want to see the DNSRecord created by the this policy, execute the following command:
kubectl --context $KUBECTL_CONTEXT get dnsrecord.kuadrant.io api-gateway-api -n kuadrant-system -o=yaml\n
So now we have a wildcard DNS record to bring traffic to our gateway.
Let's test it again. This time we expect a 403
still as the deny-all policy is still in effect. Notice we no longer need to set the Host header directly.
Note: If you have followed through this guide on more than 1 cluster, the DNS record for the HTTPRoute hostname will have multiple IP addresses. This means that requests will be made in a round robin pattern across clusters as your DNS provider sends different responses to lookups. You may need to send multiple requests before one hits the cluster you are currently configuring.
curl -k \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" -i\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#override-the-gateways-deny-all-authpolicy","title":"\u277c Override the Gateway's deny-all AuthPolicy","text":"Next, we are going to allow authenticated access to our Toystore API. To do this, we will define an AuthPolicy that targets the HTTPRoute. Note that any new HTTPRoutes will still be affected by the gateway-level policy, but as we want users to now access this API, we need to override that policy. For simplicity, we will use API keys to authenticate the requests, though many other options are available.
Let's define an API Key for users bob and alice.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: bob-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: bob\nstringData:\n api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: alice-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: alice\nstringData:\n api_key: IAMALICE\ntype: Opaque\nEOF\n
Now, we will override the AuthPolicy to start accepting the API keys:
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels:\n app: toystore\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#override-the-gateways-ratelimitpolicy","title":"\u277d Override the Gateway's RateLimitPolicy","text":"The gateway limits are a good set of limits for the general case, but as the developers of this API we know that we only want to allow a certain number of requests to specific users, and a general limit for all other users.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"general-user\":\n rates:\n\n - limit: 1\n duration: 3\n unit: second\n counters:\n - metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: neq\n value: bob\n \"bob-limit\":\n rates:\n - limit: 2\n duration: 3\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: bob\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
As just another example, we have given bob twice as many requests to use compared to everyone else.
Let's test this new setup.
By sending requests as alice:
while :; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
By sending requests as bob:
while :; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Note: If you configured a DNS provider during the setup and defined the DNSPolicy as described in one of the previous chapters you can omit the --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST}
flag.
Note: If you have followed through this guide on more than 1 cluster, the DNS record for the HTTPRoute hostname will have multiple IP addresses. This means that requests will be made in a round robin pattern across clusters as your DNS provider sends different responses to lookups.
while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/","title":"Simple Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.
In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.toystore.com
, where it exposes the endpoints GET /toys*
and POST /toys
, respectively, to mimic a operations of reading and writing toy records.
We will rate limit the POST /toys
endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#run-the-steps-1-3","title":"Run the steps \u2460 \u2192 \u2462","text":""},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Create the deployment:
kubectl apply -f examples/toystore/toystore.yaml\n
Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\n - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Verify the route works:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#3-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2462 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"create-toy\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n routeSelectors:\n - matches: # selects the 2nd HTTPRouteRule of the targeted route\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests in a loop.
Up to 5 successful (200 OK
) requests every 10 seconds to POST /toys
, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -X POST | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) to GET /toys
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/examples/alerts/","title":"Index","text":""},{"location":"kuadrant-operator/examples/alerts/#slo-multi-burn-rate-multi-window-alerts","title":"SLO Multi burn rate multi window alerts","text":"Kuadrant have created two example SLO alerts to help give ideas on the types of SLO alerts that could be used with the operator. We have created one alert for latency and one for availability, both are Multiwindow, Multi-Burn-Rate Alerts. The alerts show a scenario where a 28d rolling window is used and a uptime of 99.95% i.e only 0.05% error budget margin is desired. This in real world time would be downtime of around:
Time Frame Duration Daily: 43s Weekly: 5m 2.4s Monthly: 21m 44s Quarterly: 1h 5m 12s Yearly: 4h 20m 49s These values can be changed to suit different scenarios
"},{"location":"kuadrant-operator/examples/alerts/#sloth","title":"Sloth","text":"Sloth is a tool to aid in the creation of multi burn rate and multi window SLO alerts and was used to create both the availability and latency alerts. It follows the common standard set out by Google's SRE book. Sloth generates alerts based on specific specs given. The specs for our example alerts can be found in the example/sloth folder.
"},{"location":"kuadrant-operator/examples/alerts/#metrics-used-for-the-alerts","title":"Metrics used for the alerts","text":""},{"location":"kuadrant-operator/examples/alerts/#availability","title":"Availability","text":"For the availability SLO alerts the Istio metric istio_requests_total
was used as its a counter type metric meaning the values can only increase as well as it gives information on all requests handled by the Istio proxy.
"},{"location":"kuadrant-operator/examples/alerts/#latency","title":"Latency","text":"For the availability SLO alerts the Istio metric istio_request_duration_milliseconds
was used as its a Histogram.
"},{"location":"kuadrant-operator/examples/alerts/#sloth-generation","title":"Sloth generation","text":"You can modify the examples Sloth specs we have and regenerate the prometheus rules using the Sloth CLI and the generate command. For more information please the Sloth website
sloth generate -i examples/alerts/sloth/latency.yaml --default-slo-period=28d\n
You can also use the make target to generate the rules to. make sloth-generate\n
"},{"location":"kuadrant-operator/examples/alerts/#prometheus-unit-tests","title":"Prometheus unit tests","text":"There are also two matching unit tests to verify and test the alerts that Sloth has generated. These can be run using the make target:
make alerts-tests\n
"},{"location":"authorino/","title":"Authorino","text":"Kubernetes-native authorization service for tailor-made Zero Trust API security.
A lightweight Envoy external authorization server fully manageable via Kubernetes Custom Resources. JWT authentication, API key, mTLS, pattern-matching authz, OPA, K8s SA tokens, K8s RBAC, external metadata fetching, and more, with minimum to no coding at all, no rebuilding of your applications.
Authorino is not about inventing anything new. It's about making the best things about auth out there easy and simple to use. Authorino is multi-tenant, it's cloud-native and it's open source.
"},{"location":"authorino/#getting-started","title":"Getting started","text":" - Deploy with the Authorino Operator
- Setup Envoy proxy and the external authorization filter
- Apply an Authorino
AuthConfig
custom resource - Obtain an authentication token and start sending requests
The full Getting started page of the docs provides details for the steps above, as well as information about requirements and next steps.
Or try out our Hello World example.
For general information about protecting your service using Authorino, check out the docs.
"},{"location":"authorino/#use-cases","title":"Use-cases","text":"The User guides section of the docs gathers several AuthN/AuthZ use-cases as well as the instructions to implement them using Authorino. A few examples are:
- Authentication with JWTs and OpenID Connect Discovery
- Authentication with API keys
- Authentication with Kubernetes SA tokens (TokenReview API)
- Authentication with X.509 certificates and mTLS
- Authorization with JSON pattern-matching rules (e.g. JWT claims, request attributes, etc)
- Authorization with Open Policy Agent (OPA) Rego policies
- Authorization using the Kubernetes RBAC (rules stated in K8s
Role
and RoleBinding
resources) - Authorization using auth metadata fetched from external sources
- OIDC authentication and RBAC with Keycloak JWTs
- Injecting auth data into the request (HTTP headers, Wristband tokens, rate-limit metadata, etc)
- Authorino for the Kubernetes control plane (aka Authorino as ValidatingWebhook service)
"},{"location":"authorino/#how-it-works","title":"How it works","text":"Authorino enables hybrid API security, with usually no code changes required to your application, tailor-made for your own combination of authentication standards and protocols and authorization policies of choice.
Authorino implements Envoy Proxy's external authorization gRPC protocol, and is a part of Red Hat Kuadrant architecture.
Under the hood, Authorino is based on Kubernetes Custom Resource Definitions and the Operator pattern.
Bootstrap and configuration:
- Deploy the service/API to be protected (\"Upstream\"), Authorino and Envoy
- Write and apply an Authorino
AuthConfig
Custom Resource associated to the public host of the service
Request-time:
- A user or service account (\"Consumer\") obtains an access token to consume resources of the Upstream service, and sends a request to the Envoy ingress endpoint
- The Envoy proxy establishes fast gRPC connection with Authorino carrying data of the HTTP request (context info), which causes Authorino to lookup for an
AuthConfig
Custom Resource to enforce (pre-cached) - Identity verification (authentication) phase - Authorino verifies the identity of the consumer, where at least one authentication method/identity provider must go through
- External metadata phase - Authorino fetches additional metadata for the authorization from external sources (optional)
- Policy enforcement (authorization) phase - Authorino takes as input a JSON composed out of context data, resolved identity object and fetched additional metadata from previous phases, and triggers the evaluation of user-defined authorization policies
- Response (metadata-out) phase \u2013 Authorino builds user-defined custom responses (dynamic JSON objects and/or Festival Wristband OIDC tokens), to be supplied back to the client and/or upstream service within added HTTP headers or as Envoy Dynamic Metadata (optional)
- Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints (optional)
- Authorino and Envoy settle the authorization protocol with either OK/NOK response
- If authorized, Envoy triggers other HTTP filters in the chain (if any), pre-injecting eventual dynamic metadata returned by Authorino, and ultimately redirects the request to the Upstream
- The Upstream serves the requested resource to the consumer
More The Architecture section of the docs covers details of protecting your APIs with Envoy and Authorino, including information about topology (centralized gateway, centralized authorization service or sidecars), deployment modes (cluster-wide reconciliation vs. namespaced instances), an specification of Authorino's AuthConfig
Custom Resource Definition (CRD) and more.
You will also find in that section information about what happens in request-time (aka Authorino's Auth Pipeline) and how to leverage the Authorization JSON for writing policies, dynamic responses and other features of Authorino.
"},{"location":"authorino/#list-of-features","title":"List of features","text":"Feature Stage Identity verification & authentication JOSE/JWT validation (OpenID Connect) Ready OAuth 2.0 Token Introspection (opaque tokens) Ready Kubernetes TokenReview (SA tokens) Ready OpenShift User-echo endpoint In analysis API key authentication Ready mTLS authentication Ready HMAC authentication Planned (#9) Plain (resolved beforehand and injected in the payload) Ready Anonymous access Ready Ad hoc external metadata fetching OpenID Connect User Info Ready UMA-protected resource attributes Ready HTTP GET/GET-by-POST Ready Policy enforcement/authorization JSON pattern matching (e.g. JWT claims, request attributes checking) Ready OPA/Rego policies (inline and pull from registry) Ready Kubernetes SubjectAccessReview (resource and non-resource attributes) Ready Authzed/SpiceDB Ready Keycloak Authorization Services (UMA-compliant Authorization API) In analysis Custom responses Festival Wristbands tokens (token normalization, Edge Authentication Architecture) Ready JSON injection (header injection, Envoy Dynamic Metadata) Ready Plain text value (header injection) Ready Custom response status code/messages (e.g. redirect) Ready Callbacks HTTP endpoints Ready Caching OpenID Connect and User-Managed Access configs Ready JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS) Ready Access tokens Ready External metadata Ready Precompiled Rego policies Ready Policy evaluation Ready Sharding (lookup performance, multitenancy) Ready For a detailed description of the features above, refer to the Features page.
"},{"location":"authorino/#faq","title":"FAQ","text":"Do I need to deploy Envoy? Authorino is built from the ground up to work well with Envoy. It is strongly recommended that you leverage Envoy along side Authorino. That said, it is possible to use Authorino without Envoy.
Authorino implements Envoy's external authorization gRPC protocol and therefore will accept any client request that complies.
Authorino also provides a second interface for raw HTTP authorization, suitable for using with Kubernetes ValidatingWebhook and other integrations (e.g. other proxies).
The only attribute of the authorization request that is strictly required is the host name. (See Host lookup for more information.) The other attributes, such as method, path, headers, etc, might as well be required, depending on each AuthConfig
. In the case of the gRPC CheckRequest
method, the host is supplied in Attributes.Request.Http.Host
and alternatively in Attributes.ContextExtensions[\"host\"]
. For raw HTTP authorization requests, the host must be supplied in Host
HTTP header.
Check out Kuadrant for easy-to-use Envoy and Authorino deployment & configuration for API management use-cases, using Kubernetes Custom Resources.
Is Authorino an Identity Provider (IdP)? No, Authorino is not an Identity Provider (IdP). Neither it is an auth server of any kind, such as an OAuth2 server, an OpenID Connect (OIDC) server, a Single Sign On (SSO) server.
Authorino is not an identity broker either. It can verify access tokens from multiple trusted sources of identity and protocols, but it will not negotiate authentication flows for non-authenticated access requests. Some tricks nonetheless can be done, for example, to redirect unauthenticated users to a login page.
For an excellent auth server that checks all the boxes above, check out Keycloak.
How does Authorino compare to Keycloak? Keycloak is a proper auth server and identity provider (IdP). It offers a huge set of features for managing identities, identity sources with multiple user federation options, and a platform for authentication and authorization services.
Keycloak exposes authenticators that implement protocols such as OpenID Connect. The is a one-time flow that establishes the delegation of power to a client, for a short period of time. To be consistent with Zero Trust security, you want a validator to verify the short-lived tokens in every request that tries to reach your protected service/resource. This step that will repeat everytime could save heavy looking up into big tables of tokens and leverage cached authorization policies for fast in-memory evaluation. This is where Authorino comes in.
Authorino verifies and validates Keycloak-issued ID tokens. OpenID Connect Discovery is used to request and cache JSON Web Key Sets (JWKS), used to verify the signature of the tokens without having to contact again with the Keycloak server, or looking in a table of credentials. Moreover, user long-lived credentials are safe, rather than spread in hops across the network.
You can also use Keycloak for storing auth-relevant resource metadata. These can be fetched by Authorino in request-time, to be combined into your authorization policies. See Keycloak Authorization Services and User-Managed Access (UMA) support, as well as Authorino UMA external metadata counter-part.
Why doesn't Authorino handle OAuth flows? It has to do with trust. OAuth grants are supposed to be negotiated directly between whoever owns the long-lived credentials in one hand (user, service accounts), and the trustworthy auth server that receives those credentials \u2013 ideally with minimum number of hops in the middle \u2013 and exchanges them for short-lived access tokens, on the other end.
There are use-cases for Authorino running in the edge (e.g. Edge Authentication Architecture and token normalization), but in most cases Authorino should be seen as a last-mile component that provides decoupled identity verification and authorization policy enforcement to protected services in request-time. In this sense, the OAuth grant is a pre-flight exchange that happens once and as direct and safe as possible, whereas auth enforcement is kept lightweight and efficient.
Where does Authorino store users and roles? Authorino does not store users, roles, role bindings, access control lists, or any raw authorization data. Authorino handles policies, where even these policies can be stored elsewhere (as opposed to stated inline inside of an Authorino AuthConfig
CR).
Authorino evaluates policies for stateless authorization requests. Any additional context is either resolved from the provided payload or static definitions inside the policies. That includes extracting user information from a JWT or client TLS certificate, requesting user metadata from opaque authentication tokens (e.g. API keys) to the trusted sources actually storing that content, obtaining synchronous HTTP metadata from services, etc.
In the case of authentication with API keys, as well as its derivative to model HTTP Basic Auth, user data are stored in Kubernetes Secret
s. The secret's keys, annotations and labels are usually the structures used to organize the data that later a policy evaluated in Authorino may require. Strictly, those are not Authorino data structures.
Can't I just use Envoy JWT Authentication and RBAC filters? Envoy's JWT Authentication works pretty much similar to Authorino's JOSE/JWT verification and validation for OpenID Connect. In both cases, the JSON Web Key Sets (JWKS) to verify the JWTs are auto-loaded and cached to be used in request-time. Moreover, you can configure for details such as where to extract the JWT from the HTTP request (header, param or cookie) and do some cool tricks regarding how dynamic metadata based on JWT claims can be injected to consecutive filters in the chain.
However, in terms of authorization, while Envoy's implementation essentially allows to check for the list of audiences (aud
JWT claim), Authorino opens up for a lot more options such as pattern-matching rules with operators and conditionals, built-in OPA and other methods of evaluating authorization policies.
Authorino also allows to combine JWT authentication with other types of authentication to support different sources of identity and groups of users such as API keys, Kubernetes tokens, OAuth opaque tokens , etc.
In summary, Envoy's JWT Authentication and Envoy RBAC filter are excellent solutions for simple use-cases where JWTs from one single issuer is the only authentication method you are planning to support and limited to no authorization rules suffice. On the other hand, if you need to integrate more identity sources, different types of authentication, authorization policies, etc, you might to consider Authorino.
Should I use Authorino if I already have Istio configured? Istio is a great solution for managing service meshes. It delivers an excellent platform with an interesting layer of abstraction on top of Envoy proxy's virtual omnipresence within the mesh.
There are lots of similarities, but also complementarity between Authorino and Istio and Istio Authorization in special.
Istio provides a simple way to enable features that are, in many cases, features of Envoy, such as authorization based on JWTs, authorization based on attributes of the request, and activation of external authorization services, without having to deal with complex Envoy config files. See Kuadrant for a similar approach, nonetheless leveraging features of Istio as well.
Authorino is an Envoy-compatible external authorization service. One can use Authorino with or without Istio.
In particular, Istio Authorization Policies can be seen, in terms of functionality and expressiveness, as a subset of one type of authorization policies supported by Authorino, the pattern-matching authorization policies. While Istio, however, is heavily focused on specific use cases of API Management, offering a relatively limited list of supported attribute conditions, Authorino is more generic, allowing to express authorization rules for a wider spectrum of use cases \u2013 ACLs, RBAC, ABAC, etc, pretty much counting on any attribute of the Envoy payload, identity object and external metadata available.
Authorino also provides built-in OPA authorization, several other methods of authentication and identity verification (e.g. Kubernetes token validation, API key-based authentication, OAuth token introspection, OIDC-discoverable JWT verification, etc), and features like fetching of external metadata (HTTP services, OIDC userinfo, UMA resource data), token normalization, wristband tokens and dynamic responses. These all can be used independently or combined, in a simple and straightforward Kubernetes-native fashion.
In summary, one might value Authorino when looking for a policy enforcer that offers:
- multiple supported methods and protocols for rather hybrid authentication, encompassing future and legacy auth needs;
- broader expressiveness and more functionalities for the authorization rules;
- authentication and authorization in one single declarative manifest;
- capability to fetch auth metadata from external sources on-the-fly;
- built-in OPA module;
- easy token normalization and/or aiming for Edge Authentication Architecture (EAA).
The good news is that, if you have Istio configured, then you have Envoy and the whole platform for wiring Authorino up if you want to. \ud83d\ude09
Do I have to learn OPA/Rego language to use Authorino? No, you do not. However, if you are comfortable with Rego from Open Policy Agent (OPA), there are some quite interesting things you can do in Authorino, just as you would in any OPA server or OPA plugin, but leveraging Authorino's built-in OPA module instead. Authorino's OPA module is compiled as part of Authorino's code directly from the Golang packages, and imposes no extra latency to the evaluation of your authorization policies. Even the policies themselves are pre-compiled in reconciliation-time, for fast evaluation afterwards, in request-time.
On the other hand, if you do not want to learn Rego or in any case would like to combine it with declarative and Kubernetes-native authN/authZ spec for your services, Authorino does complement OPA with at least two other methods for expressing authorization policies \u2013 i.e. pattern-matching authorization and Kubernetes SubjectAccessReview, the latter allowing to rely completely on the Kubernetes RBAC.
You break down, mix and combine these methods and technolgies in as many authorization policies as you want, potentially applying them according to specific conditions. Authorino will trigger the evaluation of concurrent policies in parallel, aborting the context if any of the processes denies access.
Authorino also packages well-established industry standards and protocols for identity verification (JOSE/JWT validation, OAuth token introspection, Kubernetes TokenReview) and ad-hoc request-time metadata fetching (OIDC userinfo, User-Managed Access (UMA)), and corresponding layers of caching, without which such functionalities would have to be implemented by code.
Can I use Authorino to protect non-REST APIs? Yes, you can. In principle, the API format (REST, gRPC, GraphQL, etc) should not matter for the authN/authZ enforcer. There are a couple points to consider though.
While REST APIs are designed in a way that, in most cases, information usually needed for the evaluation of authorization policies are available in the metadata of the HTTP request (method, path, headers), other API formats quite often will require processing of the HTTP body. By default, Envoy's external authorization HTTP filter will not forward the body of the request to Authorino; to change that, enable the with_request_body
option in the Envoy configuration for the external authorization filter. E.g.:
with_request_body:\n max_request_bytes: 1024\n allow_partial_message: true\n pack_as_bytes: true\n
Additionally, when enabling the request body passed in the payload to Authorino, parsing of the content should be of concern as well. Authorino provides easy access to attributes of the HTTP request, parsed as part of the Authorization JSON, however the body of the request is passed as string and should be parsed by the user according to each case.
Check out Authorino OPA authorization and the Rego Encoding functions for options to parse serialized JSON, YAML and URL-encoded params. For XML transformation, an external parsing service connected via Authorino's HTTP GET/GET-by-POST external metadata might be required.
Can I run Authorino other than on Kubernetes? As of today, no, you cannot, or at least it wouldn't suit production requirements.
Do I have to be admin of the cluster to install Authorino? To install the Authorino Custom Resource Definition (CRD) and to define cluster roles required by the Authorino service, admin privilege to the Kubernetes cluster is required. This step happens only once per cluster and is usually equivalent to installing the Authorino Operator.
Thereafter, deploying instances of the Authorino service and applying AuthConfig
custom resources to a namespace depend on the permissions set by the cluster administrator \u2013 either directly by editing the bindings in the cluster's RBAC, or via options of the operator. In most cases, developers will be granted permissions to create and manage AuthConfig
s, and sometimes to deploy their own instances of Authorino.
Is it OK to store AuthN/AuthZ configs as Kubernetes objects? Authorino's API checks all the bullets to be aggregated to the Kubernetes cluster APIs, and therefore using Custom Resource Definition (CRD) and the Operator pattern has always been an easy design decision.
By merging the definitions of service authN/authZ to the control plane, Authorino AuthConfig
resources can be thought as extensions of the specs of the desired state of services regarding the data flow security. The Authorino custom controllers, built-in into the authorization service, are the agents that read from that desired state and reconcile the processes operating in the data plane.
Authorino is declarative and seamless for developers and cluster administrators managing the state of security of the applications running in the server, used to tools such as kubectl
, the Kubernetes UI and its dashboards. Instead of learning about yet another configuration API format, Authorino users can jump straight to applying and editing YAML or JSON structures they already know, in a way that things such as spec
, status
, namespace
and labels
have the meaning they are expected to have, and docs are as close as kubectl explain
. Moreover, Authorino does not pile up any other redundant layers of APIs, event-processing, RBAC, transformation and validation webhooks, etc. It is Kubernetes in its best.
In terms of scale, Authorino AuthConfig
s should grow proportionally to the number of protected services, virtually limited by nothing but the Kubernetes API data storage, while namespace division and label selectors help adjust horizontally and keep distributed.
In other words, there are lots of benefits of using Kubernetes custom resources and custom controllers, and unless you are planning on bursting your server with more services than it can keep record, it is totally \ud83d\udc4d to store your AuthN/AuthZ configs as cluster API objects.
Can I use Authorino for rate limiting? You can, but you shouldn't. Check out instead Limitador, for simple and efficient global rate limiting. Combine it with Authorino and Authorino's support for Envoy Dynamic Metadata for authenticated rate limiting.
"},{"location":"authorino/#benchmarks","title":"Benchmarks","text":"Configuration of the tests (Authorino features):
Performance test Identity Metadata Authorization Response ReconcileAuthConfig
OIDC/JWT UserInfo, UMA OPA(inline Rego) - AuthPipeline
OIDC/JWT - JSON pattern-matching(JWT claim check) - APIKeyAuthn
API key N/A N/A N/A JSONPatternMatchingAuthz
N/A N/A JSON pattern-matching N/A OPAAuthz
N/A N/A OPA(inline Rego) N/A Platform: linux/amd64 CPU: Intel\u00ae Xeon\u00ae Platinum 8370C 2.80GHz Cores: 1, 4, 10
Results:
ReconcileAuthConfig:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 1.533m \u00b1 2% 264.4Ki \u00b1 0% 6.470k \u00b1 0%\n*-4 1.381m \u00b1 6% 264.5Ki \u00b1 0% 6.471k \u00b1 0%\n*-10 1.563m \u00b1 5% 270.2Ki \u00b1 0% 6.426k \u00b1 0%\ngeomean 1.491m 266.4Ki 6.456k\n\nAuthPipeline:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 388.0\u00b5 \u00b1 2% 80.70Ki \u00b1 0% 894.0 \u00b1 0%\n*-4 348.4\u00b5 \u00b1 5% 80.67Ki \u00b1 2% 894.0 \u00b1 3%\n*-10 356.4\u00b5 \u00b1 2% 78.97Ki \u00b1 0% 860.0 \u00b1 0%\ngeomean 363.9\u00b5 80.11Ki 882.5\n\nAPIKeyAuthn:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 3.246\u00b5 \u00b1 1% 480.0 \u00b1 0% 6.000 \u00b1 0%\n*-4 3.111\u00b5 \u00b1 0% 480.0 \u00b1 0% 6.000 \u00b1 0%\n*-10 3.091\u00b5 \u00b1 1% 480.0 \u00b1 0% 6.000 \u00b1 0%\ngeomean 3.148\u00b5 480.0 6.000\n\nOPAAuthz vs JSONPatternMatchingAuthz:\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 sec/op \u2502 sec/op vs base \u2502\n\n* 87.469\u00b5 \u00b1 1% 1.797\u00b5 \u00b1 1% -97.95% (p=0.000 n=10)\n*-4 95.954\u00b5 \u00b1 3% 1.766\u00b5 \u00b1 0% -98.16% (p=0.000 n=10)\n*-10 96.789\u00b5 \u00b1 4% 1.763\u00b5 \u00b1 0% -98.18% (p=0.000 n=10)\ngeomean 93.31\u00b5 1.775\u00b5 -98.10%\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 B/op \u2502 B/op vs base \u2502\n\n* 28826.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\n*-4 28844.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\n*-10 28862.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\ngeomean 28.17Ki 64.00 -99.78%\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 allocs/op \u2502 allocs/op vs base \u2502\n\n* 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\n*-4 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\n*-10 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\ngeomean 569.0 2.000 -99.65%\n
"},{"location":"authorino/#contributing","title":"Contributing","text":"If you are interested in contributing to Authorino, please refer to the Developer's guide for info about the stack and requirements, workflow, policies and Code of Conduct.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"authorino/docs/","title":"Documentation","text":""},{"location":"authorino/docs/#getting-started","title":"Getting started","text":""},{"location":"authorino/docs/#terminology","title":"Terminology","text":""},{"location":"authorino/docs/#architecture","title":"Architecture","text":""},{"location":"authorino/docs/#feature-description","title":"Feature description","text":""},{"location":"authorino/docs/#user-guides","title":"User guides","text":""},{"location":"authorino/docs/#developers-guide","title":"Developer\u2019s guide","text":""},{"location":"authorino/docs/architecture/","title":"Architecture","text":""},{"location":"authorino/docs/architecture/#overview","title":"Overview","text":"There are a few concepts to understand Authorino's architecture. The main components are: Authorino, Envoy and the Upstream service to be protected. Envoy proxies requests to the configured virtual host upstream service, first contacting with Authorino to decide on authN/authZ.
The topology can vary from centralized proxy and centralized authorization service, to dedicated sidecars, with the nuances in between. Read more about the topologies in the Topologies section below.
Authorino is deployed using the Authorino Operator, from an Authorino
Kubernetes custom resource. Then, from another kind of custom resource, the AuthConfig
CRs, each Authorino instance reads and adds to the index the exact rules of authN/authZ to enforce for each protected host (\"index reconciliation\").
Everything that the AuthConfig reconciler can fetch in reconciliation-time is stored in the index. This is the case of static parameters such as signing keys, authentication secrets and authorization policies from external policy registries.
AuthConfig
s can refer to identity providers (IdP) and trusted auth servers whose access tokens will be accepted to authenticate to the protected host. Consumers obtain an authentication token (short-lived access token or long-lived API key) and send those in the requests to the protected service.
When Authorino is triggered by Envoy via the gRPC interface, it starts evaluating the Auth Pipeline, i.e. it applies to the request the parameters to verify the identity and to enforce authorization, as found in the index for the requested host (See host lookup for details).
Apart from static rules, these parameters can include instructions to contact online with external identity verifiers, external sources of metadata and policy decision points (PDPs).
On every request, Authorino's \"working memory\" is called Authorization JSON, a data structure that holds information about the context (the HTTP request) and objects from each phase of the auth pipeline: i.e., authentication verification (phase i), ad-hoc metadata fetching (phase ii), authorization policy enforcement (phase iii), dynamic response (phase iv), and callbacks (phase v). The evaluators in each of these phases can both read and write from the Authorization JSON for dynamic steps and decisions of authN/authZ.
"},{"location":"authorino/docs/architecture/#topologies","title":"Topologies","text":"Typically, upstream APIs are deployed to the same Kubernetes cluster and namespace where the Envoy proxy and Authorino is running (although not necessarily). Whatever is the case, Envoy must be proxying to the upstream API (see Envoy's HTTP route components and virtual hosts) and pointing to Authorino in the external authorization filter.
This can be achieved with different topologies:
- Envoy can be a centralized gateway with one dedicated instance of Authorino, proxying to one or more upstream services
- Envoy can be deployed as a sidecar of each protected service, but still contacting from a centralized Authorino authorization service
- Both Envoy and Authorino deployed as sidecars of the protected service, restricting all communication between them to localhost
Each topology above induces different measures for security.
"},{"location":"authorino/docs/architecture/#centralized-gateway","title":"Centralized gateway","text":"Recommended in the protected services to validate the origin of the traffic. It must have been proxied by Envoy. See Authorino JSON injection for an extra validation option using a shared secret passed in HTTP header.
"},{"location":"authorino/docs/architecture/#centralized-authorization-service","title":"Centralized authorization service","text":"Protected service should only listen on localhost
and all traffic can be considered safe.
"},{"location":"authorino/docs/architecture/#sidecars","title":"Sidecars","text":"Recommended namespaced
instances of Authorino with fine-grained label selectors to avoid unnecessary caching of AuthConfig
s.
Apart from that, protected service should only listen on localhost
and all traffic can be considered safe.
"},{"location":"authorino/docs/architecture/#cluster-wide-vs-namespaced-instances","title":"Cluster-wide vs. Namespaced instances","text":"Authorino instances can run in either cluster-wide or namespaced mode.
Namespace-scoped instances only watch resources (AuthConfig
s and Secret
s) created in a given namespace. This deployment mode does not require admin privileges over the Kubernetes cluster to deploy the instance of the service (given Authorino's CRDs have been installed beforehand, such as when Authorino is installed using the Authorino Operator).
Cluster-wide deployment mode, in contraposition, deploys instances of Authorino that watch resources across the entire cluster, consolidating all resources into a multi-namespace index of auth configs. Admin privileges over the Kubernetes cluster is required to deploy Authorino in cluster-wide mode.
Be careful to avoid superposition when combining multiple Authorino instances and instance modes in the same Kubernetes cluster. Apart from caching unnecessary auth config data in the instances depending on your routing settings, the leaders of each instance (set of replicas) may compete for updating the status of the custom resources that are reconciled. See Resource reconciliation and status update for more information.
If necessary, use label selectors to narrow down the space of resources watched and reconciled by each Authorino instance. Check out the Sharding section below for details.
"},{"location":"authorino/docs/architecture/#the-authorino-authconfig-custom-resource-definition-crd","title":"The Authorino AuthConfig
Custom Resource Definition (CRD)","text":"The desired protection for a service is declaratively stated by applying an AuthConfig
Custom Resource to the Kubernetes cluster running Authorino.
An AuthConfig
resource typically looks like the following:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n # The list of public host names of the services protected by this AuthConfig resource.\n # Authorino uses the host name provided in the payload of external authorization request to lookup for the corresponding AuthConfig to enforce.\n # Hostname collisions are prevented by rejecting to index a hostname already taken by another AuthConfig.\n # Format: hostname[:port]\n hosts:\n\n - my-api.io:443 # north-south traffic\n - my-api.ns.svc.cluster.local # east-west traffic\n\n # Set of stored named patterns to be reused in conditions and pattern-matching authorization rules\n patterns: {\"name\" \u2192 {selector, operator, value}, \u2026}\n\n # Top-level conditions for the AuthConfig to be enforced.\n # If omitted, the AuthConfig will be enforced at all requests.\n # If present, all conditions must match for the AuthConfig to be enforced; otherwise, Authorino skips the AuthConfig and returns to the auth request with status OK.\n when: [{selector, operator, value | named pattern ref}, \u2026]\n\n # List of one or more trusted sources of identity:\n # - Configurations to verify JSON Web Tokens (JWTs) issued by an OpenID Connect (OIDC) server\n # - Endpoints for OAuth 2.0 token introspection\n # - Attributes for the Kubernetes `TokenReview` API\n # - Label selectors for API keys (stored in Kubernetes `Secret`s)\n # - Label selectors trusted x509 issuer certificates (stored in Kubernetes `Secret`s)\n # - Selectors for plain identity objects supplied in the payload of the authorization request\n # - Anonymous access configs\n authentication: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of sources of external metadata for the authorization (optional):\n # - Endpoints for HTTP GET or GET-by-POST requests\n # - OIDC UserInfo endpoints (associated with an OIDC token issuer specified in the authentication configs)\n # - User-Managed Access (UMA) resource registries\n metadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of authorization policies to be enforced (optional):\n # - Pattern-matching rules (e.g. `context.request.http.path eq '/pets'`)\n # - Open Policy Agent (OPA) inline or external Rego policies\n # - Attributes for the Kubernetes `SubjectAccessReview` API\n # \u2013 Attributes for authorization with an external SpiceDB server\n authorization: {\"name\" \u2192 {\u2026}, \u2026}\n\n # Customization to the response to the external authorization request (optional)\n response:\n # List of dynamic response elements into the request on success authoization (optional):\n # - Plain text\n # - JSON objects\n # - Festival Wristbands (signed JWTs issued by Authorino)\n success:\n # List of HTTP headers to inject into the request post-authorization (optional):\n headers: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of Envoy Dynamic Metadata to inject into the request post-authorization (optional):\n dynamicMetadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n # Custom HTTP status code, message and headers to replace the default `401 Unauthorized` response (optional)\n unauthenticated:\n code: 302\n message: Redirecting to login\n headers:\n \"Location\":\n value: https://my-app.io/login\n\n # Custom HTTP status code, message and headers to replace the default `and `403 Forbidden` response (optional)\n unauthorized: {code, message, headers, body}\n\n # List of callback targets:\n # - Endpoints for HTTP requests\n callbacks: {\"name\" \u2192 {\u2026}, \u2026}\n
Check out the OAS of the AuthConfig
CRD for a formal specification of the options for authentication
verification, external metadata
fetching, authorization
policies, and dynamic response
, as well as any other host protection capability implemented by Authorino.
You can also read the specification from the CLI using the kubectl explain
command. The Authorino CRD is required to have been installed in Kubernetes cluster. E.g. kubectl explain authconfigs.spec.authentication.overrides
.
A complete description of supported features and corresponding configuration options within an AuthConfig
CR can be found in the Features page.
More concrete examples of AuthConfig
s for specific use-cases can be found in the User guides.
"},{"location":"authorino/docs/architecture/#resource-reconciliation-and-status-update","title":"Resource reconciliation and status update","text":"The instances of the Authorino authorization service workload, following the Operator pattern, watch events related to the AuthConfig
custom resources, to build and reconcile an in-memory index of configs. Whenever a replica receives traffic for authorization request, it looks up in the index of AuthConfig
s and then triggers the \"Auth Pipeline\", i.e. enforces the associated auth spec onto the request.
An instance can be a single authorization service workload or a set of replicas. All replicas watch and reconcile the same set of resources that match the --auth-config-label-selector
and --secret-label-selector
configuration options. (See both Cluster-wide vs. Namespaced instances and Sharding, for details about defining the reconciliation space of Authorino instances.)
The above means that all replicas of an Authorino instance should be able to receive traffic for authorization requests.
Among the multiple replicas of an instance, Authorino elects one replica to be leader. The leader is responsible for updating the status of reconciled AuthConfig
s. If the leader eventually becomes unavailable, the instance will automatically elect another replica take its place as the new leader.
The status of an AuthConfig
tells whether the resource is \"ready\" (i.e. indexed). It also includes summary information regarding the numbers of authentication configs, metadata configs, authorization configs and response configs within the spec, as well as whether Festival Wristband tokens are being issued by the Authorino instance as by spec.
Apart from watching events related to AuthConfig
custom resources, Authorino also watches events related to Kubernetes Secret
s, as part of Authorino's API key authentication feature. Secret
resources that store API keys are linked to their corresponding AuthConfig
s in the index. Whenever the Authorino instance detects a change in the set of API key Secret
s linked to an AuthConfig
s, the instance reconciles the index.
Authorino only watches events related to Secret
s whose metadata.labels
match the label selector --secret-label-selector
of the Authorino instance. The default values of the label selector for Kubernetes Secret
s representing Authorino API keys is authorino.kuadrant.io/managed-by=authorino
.
"},{"location":"authorino/docs/architecture/#the-auth-pipeline-aka-enforcing-protection-in-request-time","title":"The \"Auth Pipeline\" (aka: enforcing protection in request-time)","text":"In each request to the protected API, Authorino triggers the so-called \"Auth Pipeline\", a set of configured evaluators that are organized in a 5-phase pipeline:
- (i) Authentication phase: at least one source of identity (i.e., one authentication config) must resolve the supplied credential in the request into a valid identity or Authorino will otherwise reject the request as unauthenticated (401 HTTP response status).
- (ii) Metadata phase: optional fetching of additional data from external sources, to add up to context and identity information, and used in authorization policies, dynamic responses and callback requests (phases iii to v).
- (iii) Authorization phase: all unskipped policies must evaluate to a positive result (\"authorized\"), or Authorino will otherwise reject the request as unauthorized (403 HTTP response code).
- (iv) Response phase \u2013 Authorino builds all user-defined response items (dynamic JSON objects and/or Festival Wristband OIDC tokens), which are supplied back to the external authorization client within added HTTP headers or as Envoy Dynamic Metadata
- (v) Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints.
Each phase is sequential to the other, from (i) to (v), while the evaluators within each phase are triggered concurrently or as prioritized. The Authentication phase (i) is the only one required to list at least one evaluator (i.e. 1+ authentication configs); Metadata, Authorization and Response phases can have any number of evaluators (including zero, and even be omitted in this case).
"},{"location":"authorino/docs/architecture/#host-lookup","title":"Host lookup","text":"Authorino reads the request host from Attributes.Http.Host
of Envoy's CheckRequest
type, and uses it as key to lookup in the index of AuthConfig
s, matched against spec.hosts
.
Alternatively to Attributes.Http.Host
, a host
entry can be supplied in the Attributes.ContextExtensions
map of the external authorino request. This will take precedence before the host attribute of the HTTP request.
The host
context extension is useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup with lookup strongly dictated by the external authorization client (e.g. Envoy), which often knows about routing and the expected AuthConfig
to enforce beyond what Authorino can infer strictly based on the host name.
Wildcards can also be used in the host names specified in the AuthConfig
, resolved by Authorino. E.g. if *.pets.com
is in spec.hosts
, Authorino will match the concrete host names dogs.pets.com
, cats.pets.com
, etc. In case, of multiple possible matches, Authorino will try the longest match first (in terms of host name labels) and fall back to the closest wildcard upwards in the domain tree (if any).
When more than one host name is specified in the AuthConfig
, all of them can be used as key, i.e. all of them can be requested in the authorization request and will be mapped to the same config.
Example. Host lookup with wildcards.
The domain tree above induces the following relation:
foo.nip.io
\u2192 authconfig-1
(matches *.io
) talker-api.nip.io
\u2192 authconfig-2
(matches talker-api.nip.io
) dogs.pets.com
\u2192 authconfig-2
(matches *.pets.com
) api.acme.com
\u2192 authconfig-3
(matches api.acme.com
) www.acme.com
\u2192 authconfig-4
(matches *.acme.com
) foo.org
\u2192 404 Not found
The host can include the port number (i.e. hostname:port
) or it can be just the name of the host name. Authorino will first try finding in the index a config associated to hostname:port
, as supplied in the authorization request; if the index misses an entry for hostname:port
, Authorino will then remove the :port
suffix and repeat the lookup using just hostname
as key. This provides implicit support for multiple port numbers for a same host without having to list all combinations in the AuthConfig
.
"},{"location":"authorino/docs/architecture/#avoiding-host-name-collision","title":"Avoiding host name collision","text":"Authorino tries to prevent host name collision between AuthConfig
s by rejecting to link in the index any AuthConfig
and host name if the host name is already linked to a different AuthConfig
in the index. This was intentionally designed to prevent users from superseding each other's AuthConfig
s, partially or fully, by just picking the same host names or overlapping host names as others.
When wildcards are involved, a host name that matches a host wildcard already linked in the index to another AuthConfig
will be considered taken, and therefore the newest AuthConfig
will be rejected to be linked to that host.
This behavior can be disabled to allow AuthConfig
s to partially supersede each others' host names (limited to strict host subsets), by supplying the --allow-superseding-host-subsets
command-line flag when running the Authorino instance.
"},{"location":"authorino/docs/architecture/#the-authorization-json","title":"The Authorization JSON","text":"On every Auth Pipeline, Authorino builds the Authorization JSON, a \"working-memory\" data structure composed of context
(information about the request, as supplied by the Envoy proxy to Authorino) and auth
(objects resolved in phases (i) to (v) of the pipeline). The evaluators of each phase can read from the Authorization JSON and implement dynamic properties and decisions based on its values.
At phase (iii), the authorization evaluators count on an Authorization JSON payload that looks like the following:
// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n \"context\": { // the input from the proxy\n \"origin\": {\u2026},\n \"request\": {\n \"http\": {\n \"method\": \"\u2026\",\n \"headers\": {\u2026},\n \"path\": \"/\u2026\",\n \"host\": \"\u2026\",\n \u2026\n }\n }\n },\n \"auth\": {\n \"identity\": {\n // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n },\n \"metadata\": {\n // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n }\n }\n}\n
The policies evaluated can use any data from the authorization JSON to define authorization rules.
After phase (iii), Authorino appends to the authorization JSON the results of this phase as well, and the payload available for phase (iv) becomes:
// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n \"context\": { // the input from the proxy\n \"origin\": {\u2026},\n \"request\": {\n \"http\": {\n \"method\": \"\u2026\",\n \"headers\": {\u2026},\n \"path\": \"/\u2026\",\n \"host\": \"\u2026\",\n \u2026\n }\n }\n },\n \"auth\": {\n \"identity\": {\n // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n },\n \"metadata\": {\n // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n },\n \"authorization\": {\n // each authorization policy result resolved by the evaluators of phase (iii), by name of the evaluator\n }\n }\n}\n
Festival Wristbands and Dynamic JSON responses can include dynamic values (custom claims/properties) fetched from the authorization JSON. These can be returned to the external authorization client in added HTTP headers or as Envoy Well Known Dynamic Metadata. Check out Custom response features for details.
For information about reading and fetching data from the Authorization JSON (syntax, functions, etc), check out JSON paths.
"},{"location":"authorino/docs/architecture/#raw-http-authorization-interface","title":"Raw HTTP Authorization interface","text":"Besides providing the gRPC authorization interface \u2013 that implements the Envoy gRPC authorization server \u2013, Authorino also provides another interface for raw HTTP authorization. This second interface responds to GET
and POST
HTTP requests sent to :5001/check
, and is suitable for other forms of integration, such as:
- using Authorino as Kubernetes ValidatingWebhook service (example);
- other HTTP proxies and API gateways;
- old versions of Envoy incompatible with the latest version of gRPC external authorization protocol (Authorino is based on v3.19.1 of Envoy external authorization API)
In the raw HTTP interface, the host used to lookup for an AuthConfig
must be supplied in the Host
HTTP header of the request. Other attributes of the HTTP request are also passed in the context to evaluate the AuthConfig
, including the body of the request.
"},{"location":"authorino/docs/architecture/#caching","title":"Caching","text":""},{"location":"authorino/docs/architecture/#openid-connect-and-user-managed-access-configs","title":"OpenID Connect and User-Managed Access configs","text":"OpenID Connect and User-Managed Access configurations, discovered usually at reconciliation-time from well-known discovery endpoints.
Cached individual OpenID Connect configurations discovered by Authorino can be configured to be auto-refreshed, by setting the corresponding spec.authentication.jwt.ttl
field in the AuthConfig (given in seconds, default: 0
\u2013 i.e. no cache update).
"},{"location":"authorino/docs/architecture/#json-web-keys-jwks-and-json-web-key-sets-jwks","title":"JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS)","text":"JSON signature verification certificates linked by discovered OpenID Connect configurations, fetched usually at reconciliation-time.
"},{"location":"authorino/docs/architecture/#revoked-access-tokens","title":"Revoked access tokens","text":"Not implemented - In analysis (#19) Caching of access tokens identified and or notified as revoked prior to expiration.
"},{"location":"authorino/docs/architecture/#external-metadata","title":"External metadata","text":"Not implemented - Planned (#21) Caching of resource data obtained in previous requests.
"},{"location":"authorino/docs/architecture/#compiled-rego-policies","title":"Compiled Rego policies","text":"Performed automatically by Authorino at reconciliation-time for the authorization policies based on the built-in OPA module.
Precompiled and cached individual Rego policies originally pulled by Authorino from external registries can be configured to be auto-refreshed, by setting the corresponding spec.authorization.opa.externalRegistry.ttl
field in the AuthConfig (given in seconds, default: 0
\u2013 i.e. no cache update).
"},{"location":"authorino/docs/architecture/#repeated-requests","title":"Repeated requests","text":"Not implemented - In analysis (#20) For consecutive requests performed, within a given period of time, by a same user that request for a same resource, such that the result of the auth pipeline can be proven that would not change.
"},{"location":"authorino/docs/architecture/#sharding","title":"Sharding","text":"By default, Authorino instances will watch AuthConfig
CRs in the entire space (namespace or entire cluster; see Cluster-wide vs. Namespaced instances for details). To support combining multiple Authorino instances and instance modes in the same Kubernetes cluster, and yet avoiding superposition between the instances (i.e. multiple instances reconciling the same AuthConfig
s), Authorino offers support for data sharding, i.e. to horizontally narrow down the space of reconciliation of an Authorino instance to a subset of that space.
The benefits of limiting the space of reconciliation of an Authorino instance include avoiding unnecessary caching and workload in instances that do not receive corresponding traffic (according to your routing settings) and preventing leaders of multiple instances (sets of replicas) to compete on resource status updates (see Resource reconciliation and status update for details).
Use-cases for sharding of AuthConfig
s:
- Horizontal load balancing of traffic of authorization requests
- Supporting for managed centralized instances of Authorino to API owners who create and maintain their own
AuthConfig
s within their own user namespaces.
Authorino's custom controllers filter the AuthConfig
-related events to be reconciled using Kubernetes label selectors, defined for the Authorino instance via --auth-config-label-selector
command-line flag. By default, --auth-config-label-selector
is empty, meaning all AuthConfig
s in the space are watched; this variable can be set to any value parseable as a valid label selector, causing Authorino to then watch only events of AuthConfig
s whose metadata.labels
match the selector.
The following are all valid examples of AuthConfig
label selector filters:
--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino,other-label=other-value\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by in (authorino,kuadrant)\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by!=authorino-v0.4\"\n--auth-config-label-selector=\"!disabled\"\n
"},{"location":"authorino/docs/architecture/#rbac","title":"RBAC","text":"The table below describes the roles and role bindings defined by the Authorino service:
Role Kind Scope(*) Description Permissions authorino-manager-role
ClusterRole
C/N Role of the Authorino manager service Watch and reconcile AuthConfig
s and Secret
s authorino-manager-k8s-auth-role
ClusterRole
C/N Role for the Kubernetes auth features Create TokenReview
s and SubjectAccessReview
s (Kubernetes auth) authorino-leader-election-role
Role
N Leader election role Create/update the ConfigMap
used to coordinate which replica of Authorino is the leader authorino-authconfig-editor-role
ClusterRole
- AuthConfig
editor R/W AuthConfig
s; Read AuthConfig/status
authorino-authconfig-viewer-role
ClusterRole
- AuthConfig
viewer Read AuthConfig
s and AuthConfig/status
authorino-proxy-role
ClusterRole
C/N Kube-rbac-proxy-role (sidecar)'s role Create TokenReview
s and SubjectAccessReview
s to check permissions to the /metrics
endpoint authorino-metrics-reader
ClusterRole
- Metrics reader GET /metrics
(*) C - Cluster-wide | N - Authorino namespace | C/N - Cluster-wide or Authorino namespace (depending on the deployment mode).
"},{"location":"authorino/docs/architecture/#observability","title":"Observability","text":"Please refer to the Observability user guide for info on Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.
"},{"location":"authorino/docs/code_of_conduct/","title":"Code of conduct","text":""},{"location":"authorino/docs/code_of_conduct/#code-of-conduct","title":"Code of Conduct","text":"Autorino follows the Kuadrant Community Code of Conduct, which is based on the CNCF Code of Conduct.
Please refer to this page for a description of the standards and values we stand for in our relationship with the community.
"},{"location":"authorino/docs/contributing/","title":"Developer's Guide","text":""},{"location":"authorino/docs/contributing/#technology-stack-for-developers","title":"Technology stack for developers","text":"Minimum requirements to contribute to Authorino are:
- Golang v1.21+
- Docker
Authorino's code was originally bundled using the Operator SDK (v1.9.0).
The following tools can be installed as part of the development workflow:
-
Installed with go install
to the $PROJECT_DIR/bin
directory:
- controller-gen: for building custom types and manifests
- Kustomize: for assembling flavoured manifests and installing/deploying
- setup-envtest: for running the tests \u2013 extra tools installed to
./testbin
- benchstat: for human-friendly test benchmark reports
- mockgen: to generate mocks for tests \u2013 e.g.
./bin/mockgen -source=pkg/auth/auth.go -destination=pkg/auth/mocks/mock_auth.go
- Kind: for deploying a containerized Kubernetes cluster for integration testing purposes
-
Other recommended tools to have installed:
- jq
- yq
- gnu-sed
"},{"location":"authorino/docs/contributing/#workflow","title":"Workflow","text":""},{"location":"authorino/docs/contributing/#check-the-issues","title":"Check the issues","text":"Start by checking the list of issues in GitHub.
In case you want to contribute with an idea for enhancement, a bug fix, or question, please make sure to describe the issue so we can start a conversation together and help you find the best way to get your contribution merged.
"},{"location":"authorino/docs/contributing/#clone-the-repo-and-setup-the-local-environment","title":"Clone the repo and setup the local environment","text":"Fork/clone the repo:
git clone git@github.com:kuadrant/authorino.git && cd authorino\n
Download the Golang dependencies:
make vendor\n
For additional automation provided, check:
make help\n
"},{"location":"authorino/docs/contributing/#make-your-changes","title":"Make your changes","text":"Good changes...
- follow the Golang conventions
- have proper test coverage
- address corresponding updates to the docs
- help us fix wherever we failed to do the above \ud83d\ude1c
"},{"location":"authorino/docs/contributing/#run-the-tests","title":"Run the tests","text":"To run the tests:
make test\n
"},{"location":"authorino/docs/contributing/#try-locally","title":"Try locally","text":""},{"location":"authorino/docs/contributing/#build-deploy-and-try-authorino-in-a-local-cluster","title":"Build, deploy and try Authorino in a local cluster","text":"The following command will:
- Start a local Kubernetes cluster (using Kind)
- Install cert-manager in the cluster
- Install the Authorino Operator and Authorino CRDs
- Build an image of Authorino based on the current branch
- Push the freshly built image to the cluster's registry
- Generate TLS certificates for the Authorino service
- Deploy an instance of Authorino
- Deploy the example application Talker API, a simple HTTP API that echoes back whatever it gets in the request
- Setup Envoy for proxying to the Talker API and using Authorino for external authorization
make local-setup\n
You will be prompted to edit the Authorino
custom resource.
The main workload composed of Authorino instance and user apps (Envoy, Talker API) will be deployed to the default
Kubernetes namespace.
Once the deployment is ready, you can forward the requests on port 8000 to the Envoy service
kubectl port-forward deployment/envoy 8000:8000 &\n
Pro tips - Change the default workload namespace by supplying the
NAMESPACE
argument to your make local-setup
and other deployment, apps and local cluster related targets. If the namespace does not exist, it will be created. - Switch to TLS disabled by default when deploying locally by supplying
TLS_ENABLED=0
to your make local-setup
and make deploy
commands. E.g. make local-setup TLS_ENABLED=0
. - Skip being prompted to edit the
Authorino
CR and default to an Authorino deployment with TLS enabled, debug/development log level/mode, and standard name 'authorino', by supplying FF=1
to your make local-setup
and make deploy
commands. E.g. make local-setup FF=1
- Supply
DEPLOY_IDPS=1
to make local-setup
and make user-apps
to deploy Keycloak and Dex to the cluster. DEPLOY_KEYCLOAK
and DEPLOY_DEX
are also available. Read more about additional tools for specific use cases in the section below. - Saving the ID of the process (PID) of the port-forward command spawned in the background can be useful to later kill and restart the process. E.g.
kubectl port-forward deployment/envoy 8000:8000 &;PID=$!
; then kill $PID
.
"},{"location":"authorino/docs/contributing/#additional-tools-for-specific-use-cases","title":"Additional tools (for specific use-cases)","text":"Limitador To deploy Limitador \u2013 pre-configured in Envoy for rate-limiting the Talker API to 5 hits per minute per user_id
when available in the cluster workload \u2013, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
Keycloak Authorino examples include a bundle of Keycloak preloaded with the following realm setup:
- Admin console: http://localhost:8080/admin (admin/p)
- Preloaded realm: kuadrant
- Preloaded clients:
- demo: to which API consumers delegate access and therefore the one which access tokens are issued to
- authorino: used by Authorino to fetch additional user info with
client_credentials
grant type - talker-api: used by Authorino to fetch UMA-protected resource data associated with the Talker API
- Preloaded resources:
/hello
/greetings/1
(owned by user john) /greetings/2
(owned by user jane) /goodbye
- Realm roles:
- member (default to all users)
- admin
- Preloaded users:
- john/p (member)
- jane/p (admin)
- peter/p (member, email not verified)
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Forward local requests to the instance of Keycloak running in the cluster:
kubectl port-forward deployment/keycloak 8080:8080 &\n
Dex Authorino examples include a bundle of Dex preloaded with the following setup:
- Preloaded clients:
- demo: to which API consumers delegate access and therefore the one which access tokens are issued to (Client secret: aaf88e0e-d41d-4325-a068-57c4b0d61d8e)
- Preloaded users:
- marta@localhost/password
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/dex/dex-deploy.yaml\n
Forward local requests to the instance of Dex running in the cluster:
kubectl port-forward deployment/dex 5556:5556 &\n
a12n-server Authorino examples include a bundle of a12n-server and corresponding MySQL database, preloaded with the following setup:
- Admin console: http://a12n-server:8531 (admin/123456)
- Preloaded clients:
- service-account-1: to obtain access tokens via
client_credentials
OAuth2 grant type, to consume the Talker API (Client secret: DbgXROi3uhWYCxNUq_U1ZXjGfLHOIM8X3C2bJLpeEdE); includes metadata privilege: { \"talker-api\": [\"read\"] }
that can be used to write authorization policies - talker-api: to authenticate to the token introspect endpoint (Client secret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g)
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n
Forward local requests to the instance of a12n-server running in the cluster:
kubectl port-forward deployment/a12n-server 8531:8531 &\n
"},{"location":"authorino/docs/contributing/#re-build-and-rollout-latest","title":"Re-build and rollout latest","text":"Re-build and rollout latest Authorino image:
make local-rollout\n
If you made changes to the CRD between iterations, re-install by running:
make install\n
"},{"location":"authorino/docs/contributing/#clean-up","title":"Clean-up","text":"The following command deletes the entire Kubernetes cluster started with Kind:
make local-cleanup\n
"},{"location":"authorino/docs/contributing/#sign-your-commits","title":"Sign your commits","text":"All commits to be accepted to Authorino's code are required to be signed. Refer to this page about signing your commits.
"},{"location":"authorino/docs/contributing/#logging-policy","title":"Logging policy","text":"A few guidelines for adding logging messages in your code:
- Make sure you understand Authorino's Logging architecture and policy regarding log levels, log modes, tracing IDs, etc.
- Respect controller-runtime's Logging Guidelines.
- Do not add sensitive data to your
info
log messages; instead, redact all sensitive data in your log messages or use debug
log level by mutating the logger with V(1)
before outputting the message.
"},{"location":"authorino/docs/contributing/#additional-resources","title":"Additional resources","text":"Here in the repo:
- Getting started
- Terminology
- Architecture
- Feature description
Other repos:
- Authorino Operator
- Authorino examples
"},{"location":"authorino/docs/contributing/#reach-out","title":"Reach out","text":"#kuadrant channel on kubernetes.slack.com.
"},{"location":"authorino/docs/features/","title":"Features","text":""},{"location":"authorino/docs/features/#overview","title":"Overview","text":"We call features of Authorino the different things one can do to enforce identity verification & authentication and authorization on requests to protected services. These can be a specific identity verification method based on a supported authentication protocol, or a method to fetch additional auth metadata in request-time, etc.
Most features of Authorino relate to the different phases of the Auth Pipeline and therefore are configured in the Authorino AuthConfig
. An identity verification/authentication feature usually refers to a functionality of Authorino such as the API key-based authentication, the validation of JWTs/OIDC ID tokens, and authentication based on Kubernetes TokenReviews. Analogously, OPA, pattern-matching and Kubernetes SubjectAccessReview are examples of authorization features of Authorino.
At a deeper level, a feature can also be an additional functionality within a bigger feature, usually applicable to the whole class the bigger feature belongs to. For instance, the configuration of how auth credentials expected to be carried in the request, which is broadly available for any identity verification method. Other examples are: Identity extension and Priorities.
A full specification of all features of Authorino that can be configured in an AuthConfig
can be found in the official spec of the custom resource definition.
You can also learn about Authorino features by using the kubectl explain
command in a Kubernetes cluster where the Authorino CRD has been installed. E.g. kubectl explain authconfigs.spec.authentication.credentials
.
"},{"location":"authorino/docs/features/#common-feature-json-paths-selector","title":"Common feature: JSON paths (selector
)","text":"The first feature of Authorino to learn about is a common functionality used in the specification of many other features. JSON paths are selectors of data from the Authorization JSON used in parts of an AuthConfig for referring to dynamic values of each authorization request.
Usage examples of JSON paths are: dynamic URLs and request parameters when fetching metadata from external sources, dynamic authorization policy rules, and dynamic authorization response attributes (e.g. injected HTTP headers, Festival Wristband token claims, etc).
"},{"location":"authorino/docs/features/#syntax","title":"Syntax","text":"The syntax to fetch data from the Authorization JSON with JSON paths is based on GJSON. Refer to GJSON Path Syntax page for more information.
"},{"location":"authorino/docs/features/#string-modifiers","title":"String modifiers","text":"On top of GJSON, Authorino defines a few string modifiers.
Examples below provided for the following Authorization JSON:
{\n \"context\": {\n \"request\": {\n \"http\": {\n \"path\": \"/pets/123\",\n \"headers\": {\n \"authorization\": \"Basic amFuZTpzZWNyZXQK\" // jane:secret\n \"baggage\": \"eyJrZXkxIjoidmFsdWUxIn0=\" // {\"key1\":\"value1\"}\n }\n }\n }\n },\n \"auth\": {\n \"identity\": {\n \"username\": \"jane\",\n \"fullname\": \"Jane Smith\",\n \"email\": \"\\u0006jane\\u0012@petcorp.com\\n\"\n },\n },\n}\n
@strip
Strips out any non-printable characters such as carriage return. E.g. auth.identity.email.@strip
\u2192 \"jane@petcorp.com\"
.
@case:upper|lower
Changes the case of a string. E.g. auth.identity.username.@case:upper
\u2192 \"JANE\"
.
@replace:{\"old\":string,\"new\":string}
Replaces a substring within a string. E.g. auth.identity.username.@replace:{\"old\":\"Smith\",\"new\":\"Doe\"}
\u2192 \"Jane Doe\"
.
@extract:{\"sep\":string,\"pos\":int}
Splits a string at occurrences of a separator (default: \" \"
) and selects the substring at the pos
-th position (default: 0
). E.g. context.request.path.@extract:{\"sep\":\"/\",\"pos\":2}
\u2192 123
.
@base64:encode|decode
base64-encodes or decodes a string value. E.g. auth.identity.username.decoded.@base64:encode
\u2192 \"amFuZQo=\"
.
In combination with @extract
, @base64
can be used to extract the username in an HTTP Basic Authentication request. E.g. context.request.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\",\"pos\":1}
\u2192 \"jane\"
.
"},{"location":"authorino/docs/features/#interpolation","title":"Interpolation","text":"JSON paths can be interpolated into strings to build template-like dynamic values. E.g. \"Hello, {auth.identity.name}!\"
.
"},{"location":"authorino/docs/features/#identity-verification-authentication-features-authentication","title":"Identity verification & authentication features (authentication
)","text":""},{"location":"authorino/docs/features/#api-key-authenticationapikey","title":"API key (authentication.apiKey
)","text":"Authorino relies on Kubernetes Secret
resources to represent API keys.
To define an API key, create a Secret
in the cluster containing an api_key
entry that holds the value of the API key.
API key secrets must be created in the same namespace of the AuthConfig
(default) or spec.authentication.apiKey.allNamespaces
must be set to true
(only works with cluster-wide Authorino instances).
API key secrets must be labeled with the labels that match the selectors specified in spec.authentication.apiKey.selector
in the AuthConfig
.
Whenever an AuthConfig
is indexed, Authorino will also index all matching API key secrets. In order for Authorino to also watch events related to API key secrets individually (e.g. new Secret
created, updates, deletion/revocation), Secret
s must also include a label that matches Authorino's bootstrap configuration --secret-label-selector
(default: authorino.kuadrant.io/managed-by=authorino
). This label may or may not be present to spec.authentication.apiKey.selector
in the AuthConfig
without implications for the caching of the API keys when triggered by the reconciliation of the AuthConfig
; however, if not present, individual changes related to the API key secret (i.e. without touching the AuthConfig
) will be ignored by the reconciler.
Example. For the following AuthConfig
:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\n namespace: authorino-system\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels: # the key-value set used to select the matching `Secret`s; resources including these labels will be accepted as valid API keys to authenticate to this service\n group: friends # some custom label\n allNamespaces: true # only works with cluster-wide Authorino instances; otherwise, create the API key secrets in the same namespace of the AuthConfig\n
The following Kubernetes Secret
represents a valid API key:
apiVersion: v1\nkind: Secret\nmetadata:\n name: user-1-api-key-1\n namespace: default\n labels:\n authorino.kuadrant.io/managed-by: authorino # so the Authorino controller reconciles events related to this secret\n group: friends\nstringData:\n api_key: <some-randomly-generated-api-key-value>\ntype: Opaque\n
The resolved identity object, added to the authorization JSON following an API key identity source evaluation, is the Kubernetes Secret
resource (as JSON).
"},{"location":"authorino/docs/features/#kubernetes-tokenreview-authenticationkubernetestokenreview","title":"Kubernetes TokenReview (authentication.kubernetesTokenReview
)","text":"Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).
These tokens can be either ServiceAccount
tokens such as the ones issued by kubelet as part of Kubernetes Service Account Token Volume Projection, or any valid user access tokens issued to users of the Kubernetes server API.
The list of audiences
of the token must include the requested host and port of the protected API (default), or all audiences specified in the Authorino AuthConfig
custom resource. For example:
For the following AuthConfig
CR, the Kubernetes token must include the audience my-api.io
:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"cluster-users\":\n kubernetesTokenReview: {}\n
Whereas for the following AuthConfig
CR, the Kubernetes token audiences must include foo and bar:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"cluster-users\":\n kubernetesTokenReview:\n audiences:\n - foo\n - bar\n
The resolved identity object added to the authorization JSON following a successful Kubernetes authentication identity evaluation is the status
field of TokenReview response (see TokenReviewStatus for reference).
"},{"location":"authorino/docs/features/#jwt-verification-authenticationjwt","title":"JWT verification (authentication.jwt
)","text":"In reconciliation-time, using OpenID Connect Discovery well-known endpoint, Authorino automatically discovers and caches OpenID Connect configurations and associated JSON Web Key Sets (JWKS) for all OpenID Connect issuers declared in an AuthConfig
. Then, in request-time, Authorino verifies the JSON Web Signature (JWS) and check the time validity of signed JSON Web Tokens (JWT) supplied on each request.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
The kid
claim stated in the JWT header must match one of the keys cached by Authorino during OpenID Connect Discovery, therefore supporting JWK rotation.
The decoded payload of the validated JWT is appended to the authorization JSON as the resolved identity.
OpenID Connect configurations and linked JSON Web Key Sets can be configured to be automatically refreshed (pull again from the OpenID Connect Discovery well-known endpoints), by setting the authentication.jwt.ttl
field (given in seconds, default: 0
\u2013 i.e. auto-refresh disabled).
For an excellent summary of the underlying concepts and standards that relate OpenID Connect and JSON Object Signing and Encryption (JOSE), see this article by Jan Rusnacko. For official specification and RFCs, see OpenID Connect Core, OpenID Connect Discovery, JSON Web Token (JWT) (RFC7519), and JSON Object Signing and Encryption (JOSE).
"},{"location":"authorino/docs/features/#oauth-20-introspection-authenticationoauth2introspection","title":"OAuth 2.0 introspection (authentication.oauth2Introspection
)","text":"For bare OAuth 2.0 implementations, Authorino can perform token introspection on the access tokens supplied in the requests to protected APIs.
Authorino does not implement any of OAuth 2.0 grants for the applications to obtain the token. However, it can verify supplied tokens with the OAuth server, including opaque tokens, as long as the server exposes the token_introspect
endpoint (RFC 7662).
Developers must set the token introspection endpoint in the AuthConfig
, as well as a reference to the Kubernetes secret storing the credentials of the OAuth client to be used by Authorino when requesting the introspect.
The response returned by the OAuth2 server to the token introspection request is the resolved identity appended to the authorization JSON.
"},{"location":"authorino/docs/features/#x509-client-certificate-authentication-authenticationx509","title":"X.509 client certificate authentication (authentication.x509
)","text":"Authorino can verify X.509 certificates presented by clients for authentication on the request to the protected APIs, at application level.
Trusted root Certificate Authorities (CA) are stored in Kubernetes Secrets labeled according to selectors specified in the AuthConfig, watched and indexed by Authorino. Make sure to create proper kubernetes.io/tls
-typed Kubernetes Secrets, containing the public certificates of the CA stored in either a tls.crt
or ca.crt
entry inside the secret.
Trusted root CA secrets must be created in the same namespace of the AuthConfig
(default) or spec.authentication.x509.allNamespaces
must be set to true
(only works with cluster-wide Authorino instances).
Client certificates must include x509 v3 extension specifying 'Client Authentication' extended key usage.
The identity object resolved out of a client x509 certificate is equal to the subject field of the certificate, and it serializes as JSON within the Authorization JSON usually as follows:
{\n \"auth\": {\n \"identity\": {\n \"CommonName\": \"aisha\",\n \"Country\": [\"PK\"],\n \"ExtraNames\": null,\n \"Locality\": [\"Islamabad\"],\n \"Names\": [\n { \"Type\": [2, 5, 4, 3], \"Value\": \"aisha\" },\n { \"Type\": [2, 5, 4, 6], \"Value\": \"PK\" },\n { \"Type\": [2, 5, 4, 7], \"Value\": \"Islamabad\" },\n { \"Type\": [2, 5, 4,10], \"Value\": \"ACME Inc.\" },\n { \"Type\": [2, 5, 4,11], \"Value\": \"Engineering\" }\n ],\n \"Organization\": [\"ACME Inc.\"],\n \"OrganizationalUnit\": [\"Engineering\"],\n \"PostalCode\": null,\n \"Province\": null,\n \"SerialNumber\": \"\",\n \"StreetAddress\": null\n }\n }\n}\n
"},{"location":"authorino/docs/features/#plain-authenticationplain","title":"Plain (authentication.plain
)","text":"Authorino can read plain identity objects, based on authentication tokens provided and verified beforehand using other means (e.g. Envoy JWT Authentication filter, Kubernetes API server authentication), and injected into the payload to the external authorization service.
The plain identity object is retrieved from the Authorization JSON based on a JSON path specified in the AuthConfig
.
This feature is particularly useful in cases where authentication/identity verification is handled before invoking the authorization service and its resolved value injected in the payload can be trusted. Examples of applications for this feature include:
- Authentication handled in Envoy leveraging the Envoy JWT Authentication filter (decoded JWT injected as 'metadata_context')
- Use of Authorino as Kubernetes ValidatingWebhook service (Kubernetes 'userInfo' injected in the body of the
AdmissionReview
request)
Example of AuthConfig
to retrieve plain identity object from the Authorization JSON.
spec:\n authentication:\n \"pre-validated-jwt\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n
If the specified JSON path does not exist in the Authorization JSON or the value is null
, the identity verification will fail and, unless other identity config succeeds, Authorino will halt the Auth Pipeline with the usual 401 Unauthorized
.
"},{"location":"authorino/docs/features/#anonymous-access-authenticationanonymous","title":"Anonymous access (authentication.anonymous
)","text":"Literally a no-op evaluator for the identity verification phase that returns a static identity object {\"anonymous\":true}
.
It allows to implement AuthConfigs
that bypasses the identity verification phase of Authorino, to such as:
- enable anonymous access to protected services (always or combined with Priorities)
- postpone authentication in the Auth Pipeline to be resolved as part of an OPA policy
Example of AuthConfig
spec that falls back to anonymous access when OIDC authentication fails, enforcing read-only access to the protected service in such cases:
spec:\n authentication:\n \"jwt\":\n jwt:\n issuerUrl: \"\u2026\"\n \"anonymous\":\n priority: 1 # expired oidc token, missing creds, etc. default to anonymous access\n anonymous: {}\n authorization:\n \"read-only-access-if-authn-fails\":\n when:\n\n - selector: auth.identity.anonymous\n operator: eq\n value: \"true\"\n patternMatching:\n patterns:\n - selector: context.request.http.method\n operator: eq\n value: GET\n
"},{"location":"authorino/docs/features/#festival-wristband-authentication","title":"Festival Wristband authentication","text":"Authorino-issued Festival Wristband tokens can be validated as any other signed JWT using Authorino's JWT verification.
The value of the issuer must be the same issuer specified in the custom resource for the protected API originally issuing wristband. Eventually, this can be the same custom resource where the wristband is configured as a valid source of identity, but not necessarily.
"},{"location":"authorino/docs/features/#extra-auth-credentials-authenticationcredentials","title":"Extra: Auth credentials (authentication.credentials
)","text":"All the identity verification methods supported by Authorino can be configured regarding the location where access tokens and credentials (i.e. authentication secrets) fly within the request.
By default, authentication secrets are expected to be supplied in the Authorization
HTTP header, with the default Bearer
prefix and the plain authentication secret separated by space.
The full list of supported options is exemplified below:
spec:\n authentication:\n \"creds-in-the-authz-header\":\n credentials:\n authorizationHeader:\n prefix: JWT\n\n \"creds-in-a-custom-header\":\n credentials:\n customHeader:\n name: X-MY-CUSTOM-HEADER\n prefix: \"\"\n\n \"creds-in-a-query-param\":\n queryString:\n name: my_param\n\n \"creds-in-a-cookie-entry\":\n cookie:\n name: cookie-key\n
"},{"location":"authorino/docs/features/#extra-identity-extension-authenticationdefaults-and-authenticationoverrides","title":"Extra: Identity extension (authentication.defaults
and authentication.overrides
)","text":"Resolved identity objects can be extended with user-defined JSON properties. Values can be static or fetched from the Authorization JSON.
A typical use-case for this feature is token normalization. Say you have more than one identity source listed in your AuthConfig
but each source issues an access token with a different JSON structure \u2013 e.g. two OIDC issuers that use different names for custom JWT claims of similar meaning; when two different identity verification/authentication methods are combined, such as API keys (whose identity objects are the corresponding Kubernetes Secret
s) and Kubernetes tokens (whose identity objects are Kubernetes UserInfo data).
In such cases, identity extension can be used to normalize the token to always include the same set of JSON properties of interest, regardless of the source of identity that issued the original token verified by Authorino. This simplifies the writing of authorization policies and configuration of dynamic responses.
In case of extending an existing property of the identity object (replacing), the API allows to control whether to overwrite the value or not. This is particularly useful for normalizing tokens of a same identity source that nonetheless may occasionally differ in structure, such as in the case of JWT claims that sometimes may not be present but can be safely replaced with another (e.g. username
or sub
).
"},{"location":"authorino/docs/features/#external-auth-metadata-features-metadata","title":"External auth metadata features (metadata
)","text":""},{"location":"authorino/docs/features/#http-getget-by-post-metadatahttp","title":"HTTP GET/GET-by-POST (metadata.http
)","text":"Generic HTTP adapter that sends a request to an external service. It can be used to fetch external metadata for the authorization policies (phase ii of the Authorino Auth Pipeline), or as a web hook.
The adapter allows issuing requests either by GET or POST methods; in both cases with URL and parameters defined by the user in the spec. Dynamic values fetched from the Authorization JSON can be used.
POST request parameters as well as the encoding of the content can be controlled using the bodyParameters
and contentType
fields of the config, respectively. The Content-Type of POST requests can be either application/x-www-form-urlencoded
(default) or application/json
.
Authentication of Authorino with the external metadata server can be set either via long-lived shared secret stored in a Kubernetes Secret or via OAuth2 client credentials grant. For long-lived shared secret, set the sharedSecretRef
field. For OAuth2 client credentials grant, use the oauth2
option.
In both cases, the location where the secret (long-lived or OAuth2 access token) travels in the request performed to the external HTTP service can be specified in the credentials
field. By default, the authentication secret is supplied in the Authorization
header with the Bearer
prefix.
Custom headers can be set with the headers
field. Nevertheless, headers such as Content-Type
and Authorization
(or eventual custom header used for carrying the authentication secret, set instead via the credentials
option) will be superseded by the respective values defined for the fields contentType
and sharedSecretRef
.
"},{"location":"authorino/docs/features/#oidc-userinfo-metadatauserinfo","title":"OIDC UserInfo (metadata.userInfo
)","text":"Online fetching of OpenID Connect (OIDC) UserInfo data (phase ii of the Authorino Auth Pipeline), associated with an OIDC identity source configured and resolved in phase (i).
Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline.
Implementation requires a JWT verification authentication config (spec.authentication.jwt
) in the same AuthConfig
, so the well-known configuration of the OpenId Connect (OIDC) issuer can be reused.
The response returned by the OIDC server to the UserInfo request is appended (as JSON) to auth.metadata
in the authorization JSON.
"},{"location":"authorino/docs/features/#user-managed-access-uma-resource-registry-metadatauma","title":"User-Managed Access (UMA) resource registry (metadata.uma
)","text":"User-Managed Access (UMA) is an OAuth-based protocol for resource owners to allow other users to access their resources. Since the UMA-compliant server is expected to know about the resources, Authorino includes a client that fetches resource data from the server and adds that as metadata of the authorization payload.
This enables the implementation of resource-level Attribute-Based Access Control (ABAC) policies. Attributes of the resource fetched in a UMA flow can be, e.g., the owner of the resource, or any business-level attributes stored in the UMA-compliant server.
A UMA-compliant server is an external authorization server (e.g., Keycloak) where the protected resources are registered. It can be as well the upstream API itself, as long as it implements the UMA protocol, with initial authentication by client_credentials
grant to exchange for a Protected API Token (PAT).
It's important to notice that Authorino does NOT manage resources in the UMA-compliant server. As shown in the flow above, Authorino's UMA client is only to fetch data about the requested resources. Authorino exchanges client credentials for a Protected API Token (PAT), then queries for resources whose URI match the path of the HTTP request (as passed to Authorino by the Envoy proxy) and fetches data of each matching resource.
The resources data is added as metadata of the authorization payload and passed as input for the configured authorization policies. All resources returned by the UMA-compliant server in the query by URI are passed along. They are available in the PDPs (authorization payload) as input.auth.metadata.custom-name => Array
. (See The \"Auth Pipeline\" for details.)
"},{"location":"authorino/docs/features/#authorization-features-authorization","title":"Authorization features (authorization
)","text":""},{"location":"authorino/docs/features/#pattern-matching-authorization-authorizationpatternmatching","title":"Pattern-matching authorization (authorization.patternMatching
)","text":"Grant/deny access based on simple pattern-matching expressions (\"patterns\") compared against values selected from the Authorization JSON.
Each expression is a tuple composed of:
- a
selector
, to fetch from the Authorization JSON \u2013 see Common feature: JSON paths for details about syntax; - an
operator
\u2013 eq
(equals), neq
(not equal); incl
(includes) and excl
(excludes), for arrays; and matches
, for regular expressions; - a fixed comparable
value
Rules can mix and combine literal expressions and references to expression sets (\"named patterns\") defined at the upper level of the AuthConfig
spec. (See Common feature: Conditions)
spec:\n authorization:\n \"my-simple-json-pattern-matching-policy\":\n patternMatching:\n patterns: # All patterns must match for access to be granted\n\n - selector: auth.identity.email_verified\n operator: eq\n value: \"true\"\n - patternRef: admin\n\n patterns:\n admin: # a named pattern that can be reused in other sets of rules or conditions\n\n - selector: auth.identity.roles\n operator: incl\n value: admin\n
"},{"location":"authorino/docs/features/#open-policy-agent-opa-rego-policies-authorizationopa","title":"Open Policy Agent (OPA) Rego policies (authorization.opa
)","text":"You can model authorization policies in Rego language and add them as part of the protection of your APIs.
Policies can be either declared in-line in Rego language (rego
) or as an HTTP endpoint where Authorino will fetch the source code of the policy in reconciliation-time (externalPolicy
).
Policies pulled from external registries can be configured to be automatically refreshed (pulled again from the external registry), by setting the authorization.opa.externalPolicy.ttl
field (given in seconds, default: 0
\u2013 i.e. auto-refresh disabled).
Authorino's built-in OPA module precompiles the policies during reconciliation of the AuthConfig and caches the precompiled policies for fast evaluation in runtime, where they receive the Authorization JSON as input.
An optional field allValues: boolean
makes the values of all rules declared in the Rego document to be returned in the OPA output after policy evaluation. When disabled (default), only the boolean value allow
is returned. Values of internal rules of the Rego document can be referenced in subsequent policies/phases of the Auth Pipeline.
"},{"location":"authorino/docs/features/#kubernetes-subjectaccessreview-authorizationkubernetessubjectaccessreview","title":"Kubernetes SubjectAccessReview (authorization.kubernetesSubjectAccessReview
)","text":"Access control enforcement based on rules defined in the Kubernetes authorization system, i.e. Role
, ClusterRole
, RoleBinding
and ClusterRoleBinding
resources of Kubernetes RBAC.
Authorino issues a SubjectAccessReview (SAR) inquiry that checks with the underlying Kubernetes server whether the user can access a particular resource, resource kind or generic URL.
It supports resource attributes authorization check (parameters defined in the AuthConfig
) and non-resource attributes authorization check (HTTP endpoint inferred from the original request).
- Resource attributes: adequate for permissions set at namespace level, defined in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb)
- Non-resource attributes: adequate for permissions set at cluster scope, defined for protected endpoints of a generic HTTP API (URL path + verb)
Example of Kubernetes role for resource attributes authorization:
apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: pet-reader\nrules:\n\n- apiGroups: [\"pets.io\"]\n resources: [\"pets\"]\n verbs: [\"get\"]\n
Example of Kubernetes cluster role for non-resource attributes authorization:
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: pet-editor\nrules:\n\n- nonResourceURLs: [\"/pets/*\"]\n verbs: [\"put\", \"delete\"]\n
Kubernetes' authorization policy configs look like the following in an Authorino AuthConfig
:
authorization:\n \"kubernetes-rbac\":\n kubernetesSubjectAccessReview:\n user: # values of the parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n selector: auth.identity.metadata.annotations.userid\n\n groups: [] # user groups to test for.\n\n # for resource attributes permission checks; omit it to perform a non-resource attributes SubjectAccessReview with path and method/verb assumed from the original request\n # if included, use the resource attributes, where the values for each parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n resourceAttributes:\n namespace:\n value: default\n group:\n value: pets.io # the api group of the protected resource to be checked for permissions for the user\n resource:\n value: pets # the resource kind\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2} # resource name \u2013 e.g., the {id} in `/pets/{id}`\n verb:\n selector: context.request.http.method.@case:lower # api operation \u2013 e.g., copying from the context to use the same http method of the request\n
user
and properties of resourceAttributes
can be defined from fixed values or patterns of the Authorization JSON.
An array of groups
(optional) can as well be set. When defined, it will be used in the SubjectAccessReview
request.
"},{"location":"authorino/docs/features/#spicedb-authorizationspicedb","title":"SpiceDB (authorization.spicedb
)","text":"Check permission requests via gRPC with an external Google Zanzibar-inspired SpiceDB server, by Authzed.
Subject, resource and permission parameters can be set to static values or read from the Authorization JSON.
spec:\n authorization:\n \"spicedb\":\n spicedb:\n endpoint: spicedb:50051\n insecure: true # disables TLS\n sharedSecretRef:\n name: spicedb\n key: token\n subject:\n kind:\n value: blog/user\n name:\n selector: auth.identity.sub\n resource:\n kind:\n value: blog/post\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2} # /posts/{id}\n permission:\n selector: context.request.http.method\n
"},{"location":"authorino/docs/features/#custom-response-features-response","title":"Custom response features (response
)","text":""},{"location":"authorino/docs/features/#custom-response-forms-successful-authorization-vs-custom-denial-status","title":"Custom response forms: successful authorization vs custom denial status","text":"The response to the external authorization request can be customized in the following fashion:
- Successful authorization (
response.success
) - Added HTTP headers (
response.success.headers
) - Envoy Dynamic Metadata (
response.success.dynamicMetadata
) - Custom denial status
- Unauthenticated (
response.unauthenticated
) - Unauthorized (
response.unauthorized
)
Successful authorization custom responses can be set based on any of the supported custom authorization methods:
- Plain text value
- JSON injection
- Festival Wristband Tokens
"},{"location":"authorino/docs/features/#added-http-headers","title":"Added HTTP headers","text":"Set custom responses as HTTP headers injected in the request post-successful authorization by specifying one of the supported methods under response.success.headers
.
The name of the response config (default) or the value of the key
option (if provided) will used as the name of the header.
"},{"location":"authorino/docs/features/#envoy-dynamic-metadata","title":"Envoy Dynamic Metadata","text":"Authorino custom response methods can also be used to propagate Envoy Dynamic Metadata. To do so, set one of the supported methods under response.success.dynamicMetadata
.
The name of the response config (default) or the value of the key
option (if provided) will used as the name of the root property of the dynamic metadata content.
A custom response exported as Envoy Dynamic Metadata can be set in the Envoy route or virtual host configuration as input to a consecutive filter in the filter chain.
E.g., to read metadata emitted by the authorization service with scheme { \"auth-data\": { \"api-key-ns\": string, \"api-key-name\": string } }
, as input in a rate limit configuration placed in the filter chain after the external authorization, the Envoy config may look like the following:
# Envoy config snippet to inject `user_namespace` and `username` rate limit descriptors from metadata emitted by Authorino\nrate_limits:\n\n- actions:\n - metadata:\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n - key: api-key-ns\n descriptor_key: user_namespace\n - metadata:\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n - key: api-key-name\n descriptor_key: username\n
"},{"location":"authorino/docs/features/#custom-denial-status-responseunauthenticated-and-responseunauthorized","title":"Custom denial status (response.unauthenticated
and response.unauthorized
)","text":"By default, Authorino will inform Envoy to respond with 401 Unauthorized
or 403 Forbidden
respectively when the identity verification (phase i of the Auth Pipeline) or authorization (phase ii) fail. These can be customized respectively by specifying spec.response.unauthanticated
and spec.response.unauthorized
in the AuthConfig
.
"},{"location":"authorino/docs/features/#custom-response-methods","title":"Custom response methods","text":""},{"location":"authorino/docs/features/#plain-text-responsesuccessheadersdynamicmetadataplain","title":"Plain text (response.success.<headers|dynamicMetadata>.plain
)","text":"Simpler, yet more generalized form, for extending the authorization response for header mutation and Envoy Dynamic Metadata, based on plain text values.
The value can be static:
response:\n success:\n headers:\n \"x-auth-service\"\n plain:\n value: Authorino\n
or fetched dynamically from the Authorization JSON (which includes support for interpolation):
response:\n success:\n headers:\n \"x-username\":\n plain:\n selector: auth.identity.username\n
"},{"location":"authorino/docs/features/#json-injection-responsesuccessheadersdynamicmetadatajson","title":"JSON injection (response.success.<headers|dynamicMetadata>.json
)","text":"User-defined dynamic JSON objects generated by Authorino in the response phase, from static or dynamic data of the auth pipeline, and passed back to the external authorization client within added HTTP headers or Dynamic Metadata.
The following Authorino AuthConfig
custom resource is an example that defines 3 dynamic JSON response items, where two items are returned to the client, stringified, in added HTTP headers, and the third as Envoy Dynamic Metadata. Envoy proxy can be configured to propagate the dynamic metadata emitted by Authorino into another filter \u2013 e.g. the rate limit filter.
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n namespace: my-namespace\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"edge\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-my-custom-header\":\n json:\n properties:\n \"prop1\":\n value: value1\n \"prop2\":\n selector: some.path.within.auth.json\n \"x-ext-auth-other-json\":\n json:\n properties:\n \"propX\":\n value: valueX\n\n dynamicMetadata:\n \"auth-data\":\n json:\n properties:\n \"api-key-ns\":\n seletor: auth.identity.metadata.namespace\n \"api-key-name\":\n selector: auth.identity.metadata.name\n
"},{"location":"authorino/docs/features/#festival-wristband-tokens-responsesuccessheadersdynamicmetadatawristband","title":"Festival Wristband tokens (response.success.<headers|dynamicMetadata>.wristband
)","text":"Festival Wristbands are signed OpenID Connect JSON Web Tokens (JWTs) issued by Authorino at the end of the auth pipeline and passed back to the client, typically in added HTTP response header. It is an opt-in feature that can be used to implement Edge Authentication Architecture (EAA) and enable token normalization. Authorino wristbands include minimal standard JWT claims such as iss
, iat
, and exp
, and optional user-defined custom claims, whose values can be static or dynamically fetched from the authorization JSON.
The Authorino AuthConfig
custom resource below sets an API protection that issues a wristband after a successful authentication via API key. Apart from standard JWT claims, the wristband contains 2 custom claims: a static value aud=internal
and a dynamic value born
that fetches from the authorization JSON the date/time of creation of the secret that represents the API key used to authenticate.
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n namespace: my-namespace\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"edge\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-wristband\":\n wristband:\n issuer: https://authorino-oidc.default.svc:8083/my-namespace/my-api-protection/x-wristband\n customClaims:\n \"aud\":\n value: internal\n \"born\":\n selector: auth.identity.metadata.creationTimestamp\n tokenDuration: 300\n signingKeyRefs:\n - name: my-signing-key\n algorithm: ES256\n - name: my-old-signing-key\n algorithm: RS256\n
The signing key names listed in signingKeyRefs
must match the names of Kubernetes Secret
resources created in the same namespace, where each secret contains a key.pem
entry that holds the value of the private key that will be used to sign the wristbands issued, formatted as PEM. The first key in this list will be used to sign the wristbands, while the others are kept to support key rotation.
For each protected API configured for the Festival Wristband issuing, Authorino exposes the following OpenID Connect Discovery well-known endpoints (available for requests within the cluster):
- OpenID Connect configuration: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-configuration
- JSON Web Key Set (JWKS) well-known endpoint: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-connect/certs
"},{"location":"authorino/docs/features/#callbacks-callbacks","title":"Callbacks (callbacks
)","text":""},{"location":"authorino/docs/features/#http-endpoints-callbackshttp","title":"HTTP endpoints (callbacks.http
)","text":"Sends requests to specified HTTP endpoints at the end of the auth pipeline.
The scheme of the http
field is the same as of metadata.http
.
Example:
spec:\n authentication: [\u2026]\n authorization: [\u2026]\n\n callbacks:\n \"log\":\n http:\n url: http://logsys\n method: POST\n body:\n selector: |\n \\{\"requestId\":context.request.http.id,\"username\":\"{auth.identity.username}\",\"authorizationResult\":{auth.authorization}\\}\n \"important-forbidden\":\n when:\n\n - selector: auth.authorization.important-policy\n operator: eq\n value: \"false\"\n http:\n url: \"http://monitoring/important?forbidden-user={auth.identity.username}\"\n
"},{"location":"authorino/docs/features/#common-feature-priorities","title":"Common feature: Priorities","text":"Priorities allow to set sequence of execution for blocks of concurrent evaluators within phases of the Auth Pipeline.
Evaluators of same priority execute concurrently to each other \"in a block\". After syncing that block (i.e. after all evaluators of the block have returned), the next block of evaluator configs of consecutive priority is triggered.
Use cases for priorities are:
- Saving expensive tasks to be triggered when there's a high chance of returning immediately after finishing executing a less expensive one \u2013 e.g.
- an identity config that calls an external IdP to verify a token that is rarely used, compared to verifying JWTs preferred by most users of the service;
- an authorization policy that performs some quick checks first, such as verifying allowed paths, and only if it passes, moves to the evaluation of a more expensive policy.
- Establishing dependencies between evaluators - e.g.
- an external metadata request that needs to wait until a previous metadata responds first (in order to use data from the response)
Priorities can be set using the priority
property available in all evaluator configs of all phases of the Auth Pipeline (identity, metadata, authorization and response). The lower the number, the highest the priority. By default, all evaluators have priority 0 (i.e. highest priority).
Consider the following example to understand how priorities work:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api\n authentication:\n \"tier-1\":\n priority: 0\n apiKey:\n selector:\n matchLabels:\n tier: \"1\"\n \"tier-2\":\n priority: 1\n apiKey:\n selector:\n matchLabels:\n tier: \"2\"\n \"tier-3\":\n priority: 1\n apiKey:\n selector:\n matchLabels:\n tier: \"3\"\n metadata:\n \"first\":\n http:\n url: http://talker-api:3000\n \"second\":\n priority: 1\n http:\n url: http://talker-api:3000/first_uuid={auth.metadata.first.uuid}\n authorization:\n \"allowed-endpoints\":\n when:\n - selector: context.request.http.path\n operator: neq\n value: /hi\n - selector: context.request.http.path\n operator: neq\n value: /hello\n - selector: context.request.http.path\n operator: neq\n value: /aloha\n - selector: context.request.http.path\n operator: neq\n value: /ciao\n patternMatching:\n patterns:\n - selector: deny\n operator: eq\n value: \"true\"\n \"more-expensive-policy\": # no point in evaluating this one if it's not an allowed endpoint\n priority: 1\n opa:\n rego: |\n allow { true }\n response:\n success:\n headers:\n \"x-auth-data\":\n json:\n properties:\n \"tier\":\n selector: auth.identity.metadata.labels.tier\n \"first-uuid\":\n selector: auth.metadata.first.uuid\n \"second-uuid\":\n selector: auth.metadata.second.uuid\n \"second-path\":\n selector: auth.metadata.second.path\n
For the AuthConfig
above,
-
Identity configs tier-2
and tier-3
(priority 1) will only trigger (concurrently) in case tier-1
(priority 0) fails to validate the authentication token first. (This behavior happens without prejudice of context canceling between concurrent evaluators \u2013 i.e. evaluators that are triggered concurrently to another, such as tier-2
and tier-3
, continue to cancel the context of each other if any of them succeeds validating the token first.)
-
Metadata source second
(priority 1) uses the response of the request issued by metadata source first
(priority 0), so it will wait for first
to finish by triggering only in the second block.
-
Authorization policy allowed-endpoints
(priority 0) is considered to be a lot less expensive than more-expensive-policy
(priority 1) and has a high chance of denying access to the protected service (if the path is not one of the allowed endpoints). By setting different priorities to these policies we ensure the more expensive policy if triggered in sequence of the less expensive one, instead of concurrently.
"},{"location":"authorino/docs/features/#common-feature-conditions-when","title":"Common feature: Conditions (when
)","text":"Conditions, named when
in the AuthConfig API, are logical expressions, composed of patterns and logical operator AND and OR, that can be used to condition the evaluation of a particular auth rule within an AuthConfig, as well as of the AuthConfig altogether (\"top-level conditions\").
The patterns are evaluated against the Authorization JSON, where each pattern is a tuple composed of:
selector
: a JSON path to fetch a value from the Authorization JSON operator
: one of: eq
(equals); neq
(not equal); incl
(includes) and excl
(excludes), for when the value fetched from the Authorization JSON is expected to be an array; matches
, for regular expressions value
: a static string value to compare the value selected from the Authorization JSON with.
An expression contains one or more patterns and they must either all evaluate to true (\"AND\" operator, declared by grouping the patterns within an all
block) or at least one of the patterns must be true (\"OR\" operator, when grouped within an any
block.) Patterns not explicitly grouped are AND'ed by default.
To avoid repetitions when listing patterns, any set of literal { pattern, operator, value }
tuples can be stored at the top-level of the AuthConfig spec, indexed by name, and later referred within an expression by including a patternRef
in the block of conditions.
Examples of when
conditions
i) to skip an entire AuthConfig
based on the context (AND operator assumed by default):
spec:\n when: # auth enforced only on requests to POST /resources/*\n\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
ii) equivalent to the above with explicit AND operator (i.e., all
block):
spec:\n when: # auth enforced only on requests to POST /resources/*\n\n - all:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
iii) OR condition (i.e., any
block):
spec:\n when: # auth enforced only on requests with HTTP method equals to POST or PUT\n\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n
iv) complex expression with nested operations:
spec:\n when: # auth enforced only on requests to POST /resources/* or PUT /resources/*\n\n - any:\n - all:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n - all:\n - selector: context.request.http.method\n operator: eq\n value: PUT\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
v) more concise equivalent of the above (with implicit AND operator at the top level):
spec:\n when: # auth enforced only on requests to /resources/* path with method equals to POST or PUT\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n
vi) to skip part of an AuthConfig (i.e., a specific auth rule):
spec:\n metadata:\n \"metadata-source\":\n http:\n url: https://my-metadata-source.io\n when: # only fetch the external metadata if the context is HTTP method other than OPTIONS\n\n - selector: context.request.http.method\n operator: neq\n value: OPTIONS\n
vii) skipping part of an AuthConfig will not affect other auth rules:
spec:\n authentication:\n \"authn-meth-1\":\n apiKey: {\u2026} # this auth rule only triggers for POST requests to /foo[/*]\n when:\n\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/foo(/.*)?$\n\n \"authn-meth-2\": # this auth rule triggerred regardless\n jwt: {\u2026}\n
viii) concrete use-case: evaluating only the necessary identity checks based on the user's indication of the preferred authentication method (prefix of the value supplied in the HTTP Authorization
request header):
spec:\n authentication:\n \"jwt\":\n when:\n\n - selector: context.request.http.headers.authorization\n operator: matches\n value: JWT .+\n jwt: {\u2026}\n\n \"api-key\":\n when:\n\n - selector: context.request.http.headers.authorization\n operator: matches\n value: APIKEY .+\n apiKey: {\u2026}\n
ix) to avoid repetition while defining patterns for conditions:
spec:\n patterns:\n a-pet: # a named pattern that can be reused in sets of conditions\n\n - selector: context.request.http.path\n operator: matches\n value: ^/pets/\\d+(/.*)$\n\n metadata:\n \"pets-info\":\n when:\n\n - patternRef: a-pet\n http:\n url: https://pets-info.io?petId={context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}}\n\n authorization:\n \"pets-owners-only\":\n when:\n\n - patternRef: a-pet\n opa:\n rego: |\n allow { input.metadata[\"pets-info\"].ownerid == input.auth.identity.userid }\n
x) combining literals and refs \u2013 concrete case: authentication required for selected operations:
spec:\n patterns:\n api-base-path:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/api/.*\n\n authenticated-user:\n\n - selector: auth.identity.anonymous\n operator: neq\n value: \"true\"\n\n authentication:\n api-users: # tries to authenticate all requests to path /api/*\n when:\n\n - patternRef: api-base-path\n jwt: {\u2026}\n\n others: # defaults to anonymous access when authentication fails or not /api/* path\n anonymous: {}\n priority: 1\n\n authorization:\n api-write-access-requires-authentication: # POST/PUT/DELETE requests to /api/* path cannot be anonymous\n when:\n\n - all:\n - patternRef: api-base-path\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n opa:\n patternMatching:\n rules:\n - patternRef: authenticated-user\n\n response: # bonus: export user data if available\n success:\n dynamicMetadata:\n \"user-data\":\n when:\n\n - patternRef: authenticated-user\n json:\n properties:\n jwt-claims:\n selector: auth.identity\n
"},{"location":"authorino/docs/features/#common-feature-caching-cache","title":"Common feature: Caching (cache
)","text":"Objects resolved at runtime in an Auth Pipeline can be cached \"in-memory\", and avoided being evaluated again at a subsequent request, until it expires. A lookup cache key and a TTL can be set individually for any evaluator config in an AuthConfig.
Each cache config induces a completely independent cache table (or \"cache namespace\"). Consequently, different evaluator configs can use the same cache key and there will be no collision between entries from different evaluators.
E.g.:
spec:\n hosts:\n\n - my-api.io\n\n authentication: [\u2026]\n\n metadata:\n \"external-metadata\":\n http:\n url: http://my-external-source?search={context.request.http.path}\n cache:\n key:\n selector: context.request.http.path\n ttl: 300\n\n authorization:\n \"complex-policy\":\n opa:\n externalPolicy:\n url: http://my-policy-registry\n cache:\n key:\n selector: \"{auth.identity.group}-{context.request.http.method}-{context.request.http.path}\"\n ttl: 60\n
The example above sets caching for the 'external-metadata' metadata config and for the 'complex-policy' authorization policy. In the case of 'external-metadata', the cache key is the path of the original HTTP request being authorized by Authorino (fetched dynamically from the Authorization JSON); i.e., after obtaining a metadata object from the external source for a given contextual HTTP path one first time, whenever that same HTTP path repeats in a subsequent request, Authorino will use the cached object instead of sending a request again to the external source of metadata. After 5 minutes (300 seconds), the cache entry will expire and Authorino will fetch again from the source if requested.
As for the 'complex-policy' authorization policy, the cache key is a string composed the 'group' the identity belongs to, the method of the HTTP request and the path of the HTTP request. Whenever these repeat, Authorino will use the result of the policy that was evaluated and cached priorly. Cache entries in this namespace expire after 60 seconds.
Notes on evaluator caching
Capacity - By default, each cache namespace is limited to 1 mb. Entries will be evicted following First-In-First-Out (FIFO) policy to release space. The individual capacity of cache namespaces is set at the level of the Authorino instance (via --evaluator-cache-size
command-line flag or spec.evaluatorCacheSize
field of the Authorino
CR).
Usage - Avoid caching objects whose evaluation is considered to be relatively cheap. Examples of operations associated to Authorino auth features that are usually NOT worth caching: validation of JSON Web Tokens (JWT), Kubernetes TokenReviews and SubjectAccessReviews, API key validation, simple JSON pattern-matching authorization rules, simple OPA policies. Examples of operations where caching may be desired: OAuth2 token introspection, fetching of metadata from external sources (via HTTP request), complex OPA policies.
"},{"location":"authorino/docs/features/#common-feature-metrics-metrics","title":"Common feature: Metrics (metrics
)","text":"By default, Authorino will only export metrics down to the level of the AuthConfig. Deeper metrics at the level of each evaluator within an AuthConfig can be activated by setting the common field metrics: true
of the evaluator config.
E.g.:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-authconfig\n namespace: my-ns\nspec:\n metadata:\n \"my-external-metadata\":\n http:\n url: http://my-external-source?search={context.request.http.path}\n metrics: true\n
The above will enable the metrics auth_server_evaluator_duration_seconds
(histogram) and auth_server_evaluator_total
(counter) with labels namespace=\"my-ns\"
, authconfig=\"my-authconfig\"
, evaluator_type=\"METADATA_GENERIC_HTTP\"
and evaluator_name=\"my-external-metadata\"
.
The same pattern works for other types of evaluators. Find below the list of all types and corresponding label constant used in the metric:
Evaluator type Metric's evaluator_type
label authentication.apiKey
IDENTITY_APIKEY authentication.kubernetesTokenReview
IDENTITY_KUBERNETES authentication.jwt
IDENTITY_OIDC authentication.oauth2Introspection
IDENTITY_OAUTH2 authentication.x509
IDENTITY_MTLS authentication.plain
IDENTITY_PLAIN authentication.anonymous
IDENTITY_NOOP metadata.http
METADATA_GENERIC_HTTP metadata.userInfo
METADATA_USERINFO metadata.uma
METADATA_UMA authorization.patternMatching
AUTHORIZATION_JSON authorization.opa
AUTHORIZATION_OPA authorization.kubernetesSubjectAccessReview
AUTHORIZATION_KUBERNETES authorization.spicedb
AUTHORIZATION_AUTHZED response.success..plain
RESPONSE_PLAIN response.success..json
RESPONSE_JSON response.success..wristband
RESPONSE_WRISTBAND Metrics at the level of the evaluators can also be enforced to an entire Authorino instance, by setting the --deep-metrics-enabled
command-line flag. In this case, regardless of the value of the field spec.(authentication|metadata|authorization|response).metrics
in the AuthConfigs, individual metrics for all evaluators of all AuthConfigs will be exported.
For more information about metrics exported by Authorino, see Observability.
"},{"location":"authorino/docs/getting-started/","title":"Getting started","text":"This page covers requirements and instructions to deploy Authorino on a Kubernetes cluster, as well as the steps to declare, apply and try out a protection layer of authentication and authorization over your service, clean-up and complete uninstallation.
If you prefer learning with an example, check out our Hello World.
"},{"location":"authorino/docs/getting-started/#requirements","title":"Requirements","text":""},{"location":"authorino/docs/getting-started/#platform-requirements","title":"Platform requirements","text":"These are the platform requirements to use Authorino:
-
Kubernetes server (recommended v1.21 or later), with permission to create Kubernetes Custom Resource Definitions (CRDs) (for bootstrapping Authorino and Authorino Operator)
Alternative: K8s distros and platforms
Alternatively to upstream Kubernetes, you should be able to use any other Kubernetes distribution or Kubernetes Management Platform (KMP) with support for Kubernetes Custom Resources Definitions (CRD) and custom controllers, such as Red Hat OpenShift, IBM Cloud Kubernetes Service (IKS), Google Kubernetes Engine (GKE), Amazon Elastic Kubernetes Service (EKS) and Azure Kubernetes Service (AKS).
-
Envoy proxy (recommended v1.19 or later), to wire up Upstream services (i.e. the services to be protected with Authorino) and external authorization filter (Authorino) for integrations based on the reverse-proxy architecture - example
Alternative: Non-reverse-proxy integration
Technically, any client that implements Envoy's external authorization gRPC protocol should be compatible with Authorino. For integrations based on the reverse-proxy architecture nevertheless, we strongly recommended that you leverage Envoy alongside Authorino.
"},{"location":"authorino/docs/getting-started/#feature-specific-requirements","title":"Feature-specific requirements","text":"A few examples are:
-
For OpenID Connect, make sure you have access to an identity provider (IdP) and an authority that can issue ID tokens (JWTs). Check out Keycloak which can solve both and connect to external identity sources and user federation like LDAP.
-
For Kubernetes authentication tokens, platform support for the TokenReview and SubjectAccessReview APIs of Kubernetes shall be required. In case you want to be able to requests access tokens for clients running outside the custer, you may also want to check out the requisites for using Kubernetes TokenRequest API (GA in v1.20).
-
For User-Managed Access (UMA) resource data, you will need a UMA-compliant server running as well. This can be an implementation of the UMA protocol by each upstream API itself or (more typically) an external server that knows about the resources. Again, Keycloak can be a good fit here as well. Just keep in mind that, whatever resource server you choose, changing-state actions commanded in the upstream APIs or other parties will have to be reflected in the resource server. Authorino will not do that for you.
Check out the Feature specification page for more feature-specific requirements.
"},{"location":"authorino/docs/getting-started/#installation","title":"Installation","text":""},{"location":"authorino/docs/getting-started/#step-install-the-authorino-operator","title":"Step: Install the Authorino Operator","text":"The simplest way to install the Authorino Operator is by applying the manifest bundle:
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
The above will install the latest build of the Authorino Operator and latest version of the manifests (CRDs and RBAC), which by default points as well to the latest build of Authorino, both based on the main
branches of each component. To install a stable released version of the Operator and therefore also defaults to its latest compatible stable release of Authorino, replace main
with another tag of a proper release of the Operator, e.g. 'v0.2.0'.
This step will also install cert-manager in the cluster (required).
Alternatively, you can deploy the Authorino Operator using the Operator Lifecycle Manager bundles. For instructions, check out Installing via OLM.
"},{"location":"authorino/docs/getting-started/#step-request-an-authorino-instance","title":"Step: Request an Authorino instance","text":"Choose either cluster-wide or namespaced deployment mode and whether you want TLS termination enabled for the Authorino endpoints (gRPC authorization, raw HTTP authorization, and OIDC Festival Wristband Discovery listeners), and follow the corresponding instructions below.
The instructions here are for centralized gateway or centralized authorization service architecture. Check out the Topologies section of the docs for alternatively running Authorino in a sidecar container.
Cluster-wide (with TLS) Create the namespace:
kubectl create namespace authorino\n
Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secret
s in the namespace):
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n
Deploy Authorino:
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n replicas: 1\n clusterWide: true\n listener:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
Cluster-wide (without TLS) kubectl create namespace authorino\nkubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: true\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
Namespaced (with TLS) Create the namespace:
kubectl create namespace myapp\n
Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secret
s in the namespace):
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/myapp/g\" | kubectl -n myapp apply -f -\n
Deploy Authorino:
kubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: false\n listener:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
Namespaced (without TLS) kubectl create namespace myapp\nkubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: false\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/getting-started/#protect-a-service","title":"Protect a service","text":"The most typical integration to protect services with Authorino is by putting the service (upstream) behind a reverse-proxy or API gateway, enabled with an authorization filter that ensures all requests to the service are first checked with the authorization server (Authorino).
To do that, make sure you have your upstream service deployed and running, usually in the same Kubernetes server where you installed Authorino. Then, setup an Envoy proxy and create an Authorino AuthConfig
for your service.
Authorino exposes 2 interfaces to serve the authorization requests:
- a gRPC interface that implements Envoy's External Authorization protocol;
- a raw HTTP authorization interface, suitable for using Authorino with Kubernetes ValidatingWebhook, for Envoy external authorization via HTTP, and other integrations (e.g. other proxies).
To use Authorino as a simple satellite (sidecar) Policy Decision Point (PDP), applications can integrate directly via any of these interfaces. By integrating via a proxy or API gateway, the combination makes Authorino to perform as an external Policy Enforcement Point (PEP) completely decoupled from the application.
"},{"location":"authorino/docs/getting-started/#life-cycle","title":"Life cycle","text":""},{"location":"authorino/docs/getting-started/#step-setup-envoy","title":"Step: Setup Envoy","text":"To configure Envoy for proxying requests targeting the upstream service and authorizing with Authorino, setup an Envoy configuration that enables Envoy's external authorization HTTP filter. Store the configuration in a ConfigMap
.
These are the important bits in the Envoy configuration to activate Authorino:
static_resources:\n listeners:\n\n - address: {\u2026} # TCP socket address and port of the proxy\n filter_chains:\n - filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n route_config: {\u2026} # routing configs - virtual host domain and endpoint matching patterns and corresponding upstream services to redirect the traffic\n http_filters:\n - name: envoy.filters.http.ext_authz # the external authorization filter\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false # ensures only authenticated and authorized traffic goes through\n grpc_service:\n envoy_grpc:\n cluster_name: authorino\n timeout: 1s\n clusters:\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization # name of the Authorino service deployed \u2013 it can be the fully qualified name with `.<namespace>.svc.cluster.local` suffix (e.g. `authorino-authorino-authorization.myapp.svc.cluster.local`)\n port_value: 50051\n transport_socket: # in case TLS termination is enabled in Authorino; omit it otherwise\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n common_tls_context:\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/authorino-ca-cert.crt\n
For a complete Envoy ConfigMap
containing an upstream API protected with Authorino, with TLS enabled and option for rate limiting with Limitador, plus a webapp served with under the same domain of the protected API, check out this example.
After creating the ConfigMap
with the Envoy configuration, create an Envoy Deployment
and Service
. E.g.:
kubectl -n myapp apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: envoy\n labels:\n app: envoy\nspec:\n selector:\n matchLabels:\n app: envoy\n template:\n metadata:\n labels:\n app: envoy\n spec:\n containers:\n\n - name: envoy\n image: envoyproxy/envoy:v1.19-latest\n command: [\"/usr/local/bin/envoy\"]\n args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n ports:\n - name: web\n containerPort: 8000 # matches the address of the listener in the envoy config\n volumeMounts:\n - name: config\n mountPath: /usr/local/etc/envoy\n readOnly: true\n - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n subPath: ca.crt\n mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n readOnly: true\n volumes:\n - name: config\n configMap:\n name: envoy\n items:\n - key: envoy.yaml\n path: envoy.yaml\n - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n secret:\n defaultMode: 420\n secretName: authorino-ca-cert\n replicas: 1\nEOF\n
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Service\nmetadata:\n name: envoy\nspec:\n selector:\n app: envoy\n ports:\n\n - name: web\n port: 8000\n protocol: TCP\nEOF\n
"},{"location":"authorino/docs/getting-started/#step-apply-an-authconfig","title":"Step: Apply an AuthConfig
","text":"Check out the docs for a full description of Authorino's AuthConfig
Custom Resource Definition (CRD) and its features.
For examples based on specific use-cases, check out the User guides.
For authentication based on OpenID Connect (OIDC) JSON Web Tokens (JWT), plus one simple JWT claim authorization check, a typical AuthConfig
custom resource looks like the following:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts: # any hosts that resolve to the envoy service and envoy routing config where the external authorization filter is enabled\n\n - my-api.io # north-south traffic through a Kubernetes `Ingress` or OpenShift `Route`\n - my-api.myapp.svc.cluster.local # east-west traffic (between applications within the cluster)\n authentication:\n \"idp-users\":\n jwt:\n issuerUrl: https://my-idp.com/auth/realm\n authorization:\n \"check-claim\":\n patternMatching:\n patterns:\n - selector: auth.identity.group\n operator: eq\n value: allowed-users\nEOF\n
After applying the AuthConfig
, consumers of the protected service should be able to start sending requests.
"},{"location":"authorino/docs/getting-started/#clean-up","title":"Clean-up","text":""},{"location":"authorino/docs/getting-started/#remove-protection","title":"Remove protection","text":"Delete the AuthConfig
:
kubectl -n myapp delete authconfig/my-api-protection\n
Decommission the Authorino instance:
kubectl -n myapp delete authorino/authorino\n
"},{"location":"authorino/docs/getting-started/#uninstall","title":"Uninstall","text":"To completely remove Authorino CRDs, run from the Authorino Operator directory:
make uninstall\n
"},{"location":"authorino/docs/getting-started/#next-steps","title":"Next steps","text":" - Read the docs. The Architecture page and the Features page are good starting points to learn more about how Authorino works and its functionalities.
- Check out the User guides for several examples of
AuthConfig
s based on specific use-cases
"},{"location":"authorino/docs/terminology/","title":"Terminology","text":"Here we define some terms that are used in the project, with the goal of avoiding confusion and facilitating more accurate conversations related to Authorino
.
If you see terms used that are not here (or are used in place of terms here) please consider contributing a definition to this doc with a PR, or modifying the use elsewhere to align with these terms.
"},{"location":"authorino/docs/terminology/#terms","title":"Terms","text":"Access token Type of temporary password (security token), tied to an authenticated identity, issued by an auth server as of request from either the identity subject itself or a registered auth client known by the auth server, and that delegates to a party powers to operate on behalf of that identity before a resource server; it can be formatted as an opaque data string or as an encoded JSON Web Token (JWT).
Application Programming Interface (API) Interface that defines interactions between multiple software applications; (in HTTP communication) set of endpoints and specification to expose resources hosted by a resource server, to be consumed by client applications; the access facade of a resource server.
Attribute-based Access Control (ABAC) Authorization model that grants/denies access to resources based on evaluation of authorization policies which combine attributes together (from claims, from the request, from the resource, etc).
Auth Usually employed as a short for authentication and authorization together (AuthN/AuthZ).
Auth client Application client (software) that uses an auth server, either in the process of authenticating and/or authorizing identity subjects (including self) who want to consume resources from a resources server or auth server.
Auth server Server where auth clients, users, roles, scopes, resources, policies and permissions can be stored and managed.
Authentication (AuthN) Process of verifying that a given credential belongs to a claimed-to-be identity; usually resulting in the issuing of an access token.
Authorization (AuthZ) Process of granting (or denying) access over a resource to a party based on the set of authorization rules, policies and/or permissions enforced.
Authorization header HTTP request header frequently used to carry credentials to authenticate a user in an HTTP communication, like in requests sent to an API; alternatives usually include credentials carried in another (custom) HTTP header, query string parameter or HTTP cookie.
Capability Usually employed to refer to a management feature of a Kubernetes-native system, based on the definition and use of Kubernetes Custom Resources (CRDs and CRs), that enables that system to one of the following \u201ccapability levels\u201d: Basic Install, Seamless Upgrades, Full Lifecycle, Deep Insights, Auto Pilot.
Claim Attribute packed in a security token which represents a claim that one who bears the token is making about an entity, usually an identity subject.
Client ID Unique identifier of an auth client within an auth server domain (or auth server realm).
Client secret Password presented by auth clients together with their Client IDs while authenticating with an auth server, either when requesting access tokens to be issued or when consuming services from the auth servers in general.
Delegation Process of granting a party (usually an auth client) with powers to act, often with limited scope, on behalf of an identity, to access resources from a resource server. See also OAuth2.
Hash-based Message Authentication Code (HMAC) Specific type of message authentication code (MAC) that involves a cryptographic hash function and a shared secret cryptographic key; it can be used to verify the authenticity of a message and therefore as an authentication method.
Identity Set of properties that qualifies a subject as a strong identifiable entity (usually a user), who can be authenticated by an auth server. See also Claims.
Identity and Access Management (IAM) system Auth system that implements and/or connects with sources of identity (IdP) and offers interfaces for managing access (authorization policies and permissions). See also Auth server.
Identity Provider (IdP) Source of identity; it can be a feature of an auth server or external source connected to an auth server.
ID token Special type of access token; an encoded JSON Web Token (JWT) that packs claims about an identity.
JSON Web Token (JWT) JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties.
JSON Web Signature (JWS) Standard for signing arbitrary data, especially JSON Web Tokens (JWT).
JSON Web Key Set (JWKS) Set of keys containing the public keys used to verify any JSON Web Token (JWT).
Keycloak Open source auth server to allow single sign-on with identity and access management.
Lightweight Directory Access Protocol (LDAP) Open standard for distributed directory information services for sharing of information about users, systems, networks, services and applications.
Mutual Transport Layer Security (mTLS) Protocol for the mutual authentication of client-server communication, i.e., the client authenticates the server and the server authenticates the client, based on the acceptance of the X.509 certificates of each party.
OAuth 2.0 (OAuth2) Industry-standard protocol for delegation.
OpenID Connect (OIDC) Simple identity verification (authentication) layer built on top of the OAuth2 protocol.
Open Policy Agent (OPA) Authorization policy agent that enables the usage of declarative authorization policies written in Rego language.
Opaque token Security token devoid of explicit meaning (e.g. random string); it requires the usage of lookup mechanism to be translated into a meaningful set claims representing an identity.
Permission Association between a protected resource the authorization policies that must be evaluated whether access should be granted; e.g. <user|group|role>
CAN DO <action>
ON RESOURCE <X>
.
Policy Rule or condition (authorization policy) that must be satisfied to grant access to a resource; strongly related to the different access control mechanisms (ACMs) and strategies one can use to protect resources, e.g. attribute-based access control (ABAC), role-based access control (RBAC), context-based access control, user-based access control (UBAC).
Policy Administration Point (PAP) Set of UIs and APIs to manage resources servers, resources, scopes, policies and permissions; it is where the auth system is configured.
Policy Decision Point (PDP) Where the authorization requests are sent, with permissions being requested, and authorization policies are evaluated accordingly.
Policy Enforcement Point (PEP) Where the authorization is effectively enforced, usually at the resource server or at a proxy, based on a response provided by the Policy Decision Point (PDP).
Policy storage Where policies are stored and from where they can be fetched, perhaps to be cached.
Red Hat SSO Auth server; downstream product created from the Keycloak Open Source project.
Refresh token Special type of security token, often provided together with an access token in an OAuth2 flow, used to renew the duration of an access token before it expires; it requires client authentication.
Request Party Token (RPT) JSON Web Token (JWT) digitally signed using JSON Web Signature (JWS), issued by the Keycloak auth server.
Resource One or more endpoints of a system, API or server, that can be protected.
Resource-level Access Control (RLAC) Authorization model that takes into consideration attributes of each specific request resource to grant/deny access to those resources (e.g. the resource's owner).
Resource server Server that hosts protected resources.
Role Aspect of a user\u2019s identity assigned to the user to indicate the level of access they should have to the system; essentially, roles represent collections of permissions
Role-based Access Control (RBAC) Authorization model that grants/denies access to resources based on the roles of authenticated users (rather than on complex attributes/policy rules).
Scope Mechanism that defines the specific operations that applications can be allowed to do or information that they can request on an identity\u2019s behalf; often presented as a parameter when access is requested as a way to communicate what access is needed, and used by auth server to respond what actual access is granted.
Single Page Application (SPA) Web application or website that interacts with the user by dynamically rewriting the current web page with new data from the web server.
Single Sign-on (SSO) Authentication scheme that allows a user to log in with a single ID and password to any of several related, yet independent, software systems.
Upstream (In the context of authentication/authorization) API whose endpoints must be protected by the auth system; the unprotected service in front of which a protection layer is added (by connecting with a Policy Decision Point).
User-based Access Control (UBAC) Authorization model that grants/denies access to resources based on claims of the identity (attributes of the user).
User-Managed Access (UMA) OAuth2-based access management protocol, used for users of an auth server to control the authorization process, i.e. directly granting/denying access to user-owned resources to other requesting parties.
"},{"location":"authorino/docs/user-guides/","title":"User guides","text":" -
Hello World The basics of protecting an API with Authorino.
-
Authentication with Kubernetes tokens (TokenReview API) Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.
-
Authentication with API keys Issue API keys stored in Kubernetes Secret
s for clients to authenticate with your protected hosts.
-
Authentication with X.509 certificates and mTLS Verify client X.509 certificates against trusted root CAs.
-
OpenID Connect Discovery and authentication with JWTs Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).
-
OAuth 2.0 token introspection (RFC 7662) Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.
-
Passing credentials (Authorization
header, cookie headers and others) Customize where credentials are supplied in the request by each trusted source of identity.
-
HTTP \"Basic\" Authentication (RFC 7235) Turn Authorino API key Secret
s settings into HTTP basic auth.
-
Anonymous access Bypass identity verification or fall back to anonymous access when credentials fail to validate
-
Token normalization Normalize identity claims from trusted sources and reduce complexity in your policies.
-
Edge Authentication Architecture (EAA) Exchange satellite (outer-layer) authentication tokens for \"Festival Wristbands\" accepted ubiquitously at the inside of your network. Normalize from multiple and varied sources of identity and authentication methods in the edge of your architecture; filter privacy data, limit the scope of permissions, and simplify authorization rules to your internal microservices.
-
Fetching auth metadata from external sources Get online data from remote HTTP services to enhance authorization rules.
-
OpenID Connect UserInfo Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.
-
Resource-level authorization with User-Managed Access (UMA) resource registry Fetch resource attributes relevant for authorization from a User-Managed Access (UMA) resource registry such as Keycloak resource server clients.
-
Simple pattern-matching authorization policies Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.
-
OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.
-
Open Policy Agent (OPA) Rego policies Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.
-
Kubernetes RBAC for service authorization (SubjectAccessReview API) Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.
-
Authorization with Keycloak Authorization Services Use Authorino as an adapter for Keycloak Authorization Services without importing any library or rebuilding your application code.
-
Integration with Authzed/SpiceDB Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.
-
Injecting data in the request Inject HTTP headers with serialized JSON content.
-
Authenticated rate limiting (with Envoy Dynamic Metadata) Provide Envoy with dynamic metadata from the external authorization process to be injected and used by consecutive filters, such as by a rate limiting service.
-
Redirecting to a login page Customize response status code and headers on failed requests. E.g. redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized
; mask resources on access denied behind a 404 Not Found
response instead of 403 Forbidden
.
-
Mixing Envoy built-in filter for auth and Authorino Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.
-
Host override via context extension Induce the lookup of an AuthConfig by supplying extended host context, for use cases such as of path prefix-based lookup and wildcard subdomains lookup.
-
Using Authorino as ValidatingWebhook service Use Authorino as a generic Kubernetes ValidatingWebhook service where the rules to validate a request to the Kubernetes API are written in an AuthConfig.
-
Reducing the operational space: sharding, noise and multi-tenancy Have multiple instances of Authorino running in the same space (Kubernetes namespace or cluster-scoped), yet watching particular sets of resources.
-
Caching Cache auth objects resolved at runtime for any configuration bit of an AuthConfig, for easy access in subsequent requests whenever an arbitrary cache key repeats, until the cache entry expires.
-
Observability Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.
"},{"location":"authorino/docs/user-guides/anonymous-access/","title":"User guide: Anonymous access","text":"Bypass identity verification or fall back to anonymous access when credentials fail to validate
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Anonymous access
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/anonymous-access/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/anonymous-access/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"public\":\n anonymous: {}\nEOF\n
The example above enables anonymous access (i.e. removes authentication), without adding any extra layer of protection to the API. This is virtually equivalent to setting a top-level condition to the AuthConfig
that always skips the configuration, or to switching authentication/authorization off completely in the route to the API.
For more sophisticated use cases of anonymous access with Authorino, consider combining this feature with other identity sources in the AuthConfig
while playing with the priorities of each source, as well as combination with when
conditions, and/or adding authorization policies that either cover authentication or address anonymous access with proper rules (e.g. enforcing read-only access).
Check out the docs for the Anonymous access feature for an example of an AuthConfig
that falls back to anonymous access when a priority OIDC/JWT-based authentication fails, and enforces a read-only policy in such cases.
"},{"location":"authorino/docs/user-guides/anonymous-access/#consume-the-api","title":"\u277b Consume the API","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/api-key-authentication/","title":"User guide: Authentication with API keys","text":"Issue API keys stored in Kubernetes Secret
s for clients to authenticate with your protected hosts.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 API key
In Authorino, API keys are stored as Kubernetes Secret
s. Each resource must contain an api_key
entry with the value of the API key, and labeled to match the selectors specified in spec.identity.apiKey.selector
of the AuthConfig
.
API key Secret
s must also include labels that match the secretLabelSelector
field of the Authorino instance. See Resource reconciliation and status update for details.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/api-key-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/api-key-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a valid API key:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
With missing or invalid API key:
curl -H 'Authorization: APIKEY invalid' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"friends\"\n# x-ext-auth-reason: the API Key provided is invalid\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#delete-an-api-key-revoke-access-to-the-api","title":"\u277d Delete an API key (revoke access to the API)","text":"kubectl delete secret/api-key-1\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/","title":"User guide: Authenticated rate limiting (with Envoy Dynamic Metadata)","text":"Provide Envoy with dynamic metadata about the external authorization process to be injected into the rate limiting filter.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Response wrappers \u2192 Envoy Dynamic Metadata
- Dynamic response \u2192 JSON injection
- Identity verification & authentication \u2192 API key
Dynamic JSON objects built out of static values and values fetched from the Authorization JSON can be wrapped to be returned to the reverse-proxy as Envoy Well Known Dynamic Metadata content. Envoy can use those to inject data returned by the external authorization service into the other filters, such as the rate limiting filter.
Check out as well the user guides about Injecting data in the request and Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-limitador","title":"\u2778 Deploy Limitador","text":"Limitador is a lightweight rate limiting service that can be used with Envoy.
On this bundle, we will deploy Limitador pre-configured to limit requests to the talker-api
domain up to 5 requests per interval of 60 seconds per user_id
. Envoy will be configured to recognize the presence of Limitador and activate it on requests to the Talker API.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-the-talker-api","title":"\u2779 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#setup-envoy","title":"\u277a Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-an-authconfig","title":"\u277b Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
An annotation auth-data/username
will be read from the Kubernetes API Key secret and passed as dynamic metadata { \"ext_auth_data\": { \"username\": \u00abannotations.auth-data/username\u00bb } }
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"rate-limit\":\n json:\n properties:\n \"username\":\n selector: auth.identity.metadata.annotations.auth-data\\/username\n key: ext_auth_data # how this bit of dynamic metadata from the ext authz service is named in the Envoy config\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-the-api-keys","title":"\u277c Create the API keys","text":"For user John:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/username: john\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
For user Jane:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/username: jane\nstringData:\n api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#consume-the-api","title":"\u277d Consume the API","text":"As John:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Repeat the request a few more times within the 60-second time window, until the response status is 429 Too Many Requests
.
While the API is still limited to John, send requests as Jane:
curl -H 'Authorization: APIKEY 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/authzed/","title":"User guide: Integration with Authzed/SpiceDB","text":"Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.
Authorino capabilities featured in this guide: - Authorization \u2192 SpiceDB
- Identity verification & authentication \u2192 API key
"},{"location":"authorino/docs/user-guides/authzed/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/authzed/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/authzed/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authzed/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/authzed/#create-the-permission-database","title":"\u277a Create the permission database","text":"Create the namespace:
kubectl create namespace spicedb\n
Create the SpiceDB instance:
kubectl -n spicedb apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: spicedb\n labels:\n app: spicedb\nspec:\n selector:\n matchLabels:\n app: spicedb\n template:\n metadata:\n labels:\n app: spicedb\n spec:\n containers:\n\n - name: spicedb\n image: authzed/spicedb\n args:\n - serve\n - \"--grpc-preshared-key\"\n - secret\n - \"--http-enabled\"\n ports:\n - containerPort: 50051\n - containerPort: 8443\n replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: spicedb\nspec:\n selector:\n app: spicedb\n ports:\n - name: grpc\n port: 50051\n protocol: TCP\n - name: http\n port: 8443\n protocol: TCP\nEOF\n
Forward local request to the SpiceDB service inside the cluster:
kubectl -n spicedb port-forward service/spicedb 8443:8443 2>&1 >/dev/null &\n
Create the permission schema:
curl -X POST http://localhost:8443/v1/schema/write \\\n -H 'Authorization: Bearer secret' \\\n -H 'Content-Type: application/json' \\\n -d @- << EOF\n{\n \"schema\": \"definition blog/user {}\\ndefinition blog/post {\\n\\trelation reader: blog/user\\n\\trelation writer: blog/user\\n\\n\\tpermission read = reader + writer\\n\\tpermission write = writer\\n}\"\n}\nEOF\n
Create the relationships:
blog/user:emilia
\u2192 writer
of blog/post:1
blog/user:beatrice
\u2192 reader
of blog/post:1
curl -X POST http://localhost:8443/v1/relationships/write \\\n -H 'Authorization: Bearer secret' \\\n -H 'Content-Type: application/json' \\\n -d @- << EOF\n{\n \"updates\": [\n {\n \"operation\": \"OPERATION_CREATE\",\n \"relationship\": {\n \"resource\": {\n \"objectType\": \"blog/post\",\n \"objectId\": \"1\"\n },\n \"relation\": \"writer\",\n \"subject\": {\n \"object\": {\n \"objectType\": \"blog/user\",\n \"objectId\": \"emilia\"\n }\n }\n }\n },\n {\n \"operation\": \"OPERATION_CREATE\",\n \"relationship\": {\n \"resource\": {\n \"objectType\": \"blog/post\",\n \"objectId\": \"1\"\n },\n \"relation\": \"reader\",\n \"subject\": {\n \"object\": {\n \"objectType\": \"blog/user\",\n \"objectId\": \"beatrice\"\n }\n }\n }\n }\n ]\n}\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Store the shared token for Authorino to authenticate with the SpiceDB instance in a Service:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: spicedb\n labels:\n app: spicedb\nstringData:\n grpc-preshared-key: secret\nEOF\n
Create the AuthConfig:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"blog-users\":\n apiKey:\n selector:\n matchLabels:\n app: talker-api\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"authzed-spicedb\":\n spicedb:\n endpoint: spicedb.spicedb.svc.cluster.local:50051\n insecure: true\n sharedSecretRef:\n name: spicedb\n key: grpc-preshared-key\n subject:\n kind:\n value: blog/user\n name:\n selector: auth.identity.metadata.annotations.username\n resource:\n kind:\n value: blog/post\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}\n permission:\n selector: context.request.http.method.@replace:{\"old\":\"GET\",\"new\":\"read\"}.@replace:{\"old\":\"POST\",\"new\":\"write\"}\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#create-the-api-keys","title":"\u277c Create the API keys","text":"For Emilia (writer):
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-writer\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: talker-api\n annotations:\n username: emilia\nstringData:\n api_key: IAMEMILIA\nEOF\n
For Beatrice (reader):
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-reader\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: talker-api\n annotations:\n username: beatrice\nstringData:\n api_key: IAMBEATRICE\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#consume-the-api","title":"\u277d Consume the API","text":"As Emilia, send a GET request:
curl -H 'Authorization: APIKEY IAMEMILIA' \\\n -X GET \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Emilia, send a POST request:
curl -H 'Authorization: APIKEY IAMEMILIA' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Beatrice, send a GET request:
curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n -X GET \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Beatrice, send a POST request:
curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: PERMISSIONSHIP_NO_PERMISSION;token=GhUKEzE2NzU3MDE3MjAwMDAwMDAwMDA=\n
"},{"location":"authorino/docs/user-guides/authzed/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace spicedb\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/caching/","title":"User guide: Caching","text":"Cache auth objects resolved at runtime for any configuration bit of an AuthConfig (i.e. any evaluator), of any phase (identity, metadata, authorization and dynamic response), for easy access in subsequent requests, whenever an arbitrary (user-defined) cache key repeats, until the cache entry expires.
This is particularly useful for configuration bits whose evaluation is significantly more expensive than accessing the cache. E.g.:
- Caching of metadata fetched from external sources in general
- Caching of previously validated identity access tokens (e.g. for OAuth2 opaque tokens that involve consuming the token introspection endpoint of an external auth server)
- Caching of complex Rego policies that involve sending requests to external services
Cases where one will NOT want to enable caching, due to relatively cheap compared to accessing and managing the cache:
- Validation of OIDC/JWT access tokens
- OPA/Rego policies that do not involve external requests
- JSON pattern-matching authorization
- Dynamic JSON responses
- Anonymous access
Authorino capabilities featured in this guide: - Common feature \u2192 Caching
- Identity verification & authentication \u2192 Anonymous access
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Dynamic response \u2192 JSON injection
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/caching/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/caching/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/caching/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/caching/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/caching/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/caching/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The example below enables caching for the external source of metadata, which in this case, for convenience, is the same upstream API protected by Authorino (i.e. the Talker API), though consumed directly by Authorino, without passing through the proxy. This API generates a uuid
random hash that it injects in the JSON response. This value is different in every request processed by the API.
The example also enables caching of returned OPA virtual documents. cached-authz
is a trivial Rego policy that always grants access, but generates a timestamp, which Authorino will cache.
In both cases, the path of the HTTP request is used as cache key. I.e., whenever the path repeats, Authorino reuse the values stored previously in each cache table (cached-metadata
and cached-authz
), respectively saving a request to the external source of metadata and the evaluation of the OPA policy. Cache entries will expire in both cases after 60 seconds they were stored in the cache.
The cached values will be visible in the response returned by the Talker API in x-authz-data
header injected by Authorino. This way, we can tell when an existing value in the cache was used and when a new one was generated and stored.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"anonymous\":\n anonymous: {}\n metadata:\n \"cached-metadata\":\n http:\n url: \"http://talker-api.default.svc.cluster.local:3000/metadata/{context.request.http.path}\"\n cache:\n key:\n selector: context.request.http.path\n ttl: 60\n authorization:\n \"cached-authz\":\n opa:\n rego: |\n now = time.now_ns()\n allow = true\n allValues: true\n cache:\n key:\n selector: context.request.http.path\n ttl: 60\n response:\n success:\n headers:\n \"x-authz-data\":\n json:\n properties:\n \"cached-metadata\":\n selector: auth.metadata.cached-metadata.uuid\n \"cached-authz\":\n selector: auth.authorization.cached-authz.now\nEOF\n
"},{"location":"authorino/docs/user-guides/caching/#consume-the-api","title":"\u277b Consume the API","text":" - To
/hello
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",\n# [\u2026]\n
- To a different path
curl http://talker-api.127.0.0.1.nip.io:8000/goodbye\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343097860450300\\\",\\\"cached-metadata\\\":\\\"37fce386-1ee8-40a7-aed1-bf8a208f283c\\\"}\",\n# [\u2026]\n
- To
/hello
again before the cache entry expires (60 seconds from the first request sent to this path)
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\", <=== same cache-id as before\n# [\u2026]\n
- To
/hello
again after the cache entry expires (60 seconds from the first request sent to this path)
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343135702743800\\\",\\\"cached-metadata\\\":\\\"e708a3a6-5caf-4028-ab5c-573ad9be7188\\\"}\", <=== different cache-id\n# [\u2026]\n
"},{"location":"authorino/docs/user-guides/caching/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/","title":"User guide: Redirecting to a login page","text":"Customize response status code and headers on failed requests to redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized
.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Custom denial status
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
Authorino's default response status codes, messages and headers for unauthenticated (401
) and unauthorized (403
) requests can be customized with static values and values fetched from the Authorization JSON.
Check out as well the user guides about HTTP \"Basic\" Authentication (RFC 7235) and OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample web application called Matrix Quotes to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-the-matrix-quotes-web-application","title":"\u2778 Deploy the Matrix Quotes web application","text":"The Matrix Quotes is a static web application that contains quotes from the film The Matrix.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Matrix Quotes webapp behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/envoy-deploy.yaml\n
The command above creates an Ingress
with host name matrix-quotes.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: matrix-quotes-protection\nspec:\n hosts:\n\n - matrix-quotes.127.0.0.1.nip.io\n authentication:\n \"browser-users\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n cookie:\n name: TOKEN\n \"http-basic-auth\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n authorizationHeader:\n prefix: Basic\n response:\n unauthenticated:\n code: 302\n headers:\n \"Location\":\n selector: \"http://matrix-quotes.127.0.0.1.nip.io:8000/login.html?redirect_to={request.path}\"\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: user-credential-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: am9objpw # john:p\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application","title":"\u277c Consume the application","text":"On a web browser, navigate to http://matrix-quotes.127.0.0.1.nip.io:8000.
Click on the cards to read quotes from characters of the movie. You should be redirected to login page.
Log in using John's credentials:
- Username: john
- Password: p
Click again on the cards and check that now you are able to access the inner pages.
You can also consume a protected endpoint of the application using HTTP Basic Authentication:
curl -u john:p http://matrix-quotes.127.0.0.1.nip.io:8000/neo.html\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#optional-modify-the-authconfig-to-authenticate-with-oidc","title":"\u277d (Optional) Modify the AuthConfig
to authenticate with OIDC","text":""},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-a-keycloak-server","title":"Setup a Keycloak server","text":"Deploy a Keycloak server preloaded with a realm named kuadrant
:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Resolve local Keycloak domain so it can be accessed from the local host and inside the cluster with the name: (This will be needed to redirect to Keycloak's login page and at the same time validate issued tokens.)
echo '127.0.0.1 keycloak' >> /etc/hosts\n
Forward local requests to the instance of Keycloak running in the cluster:
kubectl port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n
Create a client:
curl -H \"Authorization: Bearer $(curl http://keycloak:8080/realms/master/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=admin-cli' -d 'username=admin' -d 'password=p' | jq -r .access_token)\" \\\n -H 'Content-type: application/json' \\\n -d '{ \"name\": \"matrix-quotes\", \"clientId\": \"matrix-quotes\", \"publicClient\": true, \"redirectUris\": [\"http://matrix-quotes.127.0.0.1.nip.io:8000/auth*\"], \"enabled\": true }' \\\n http://keycloak:8080/admin/realms/kuadrant/clients\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#reconfigure-the-matrix-quotes-app-to-use-keycloaks-login-page","title":"Reconfigure the Matrix Quotes app to use Keycloak's login page","text":"kubectl set env deployment/matrix-quotes KEYCLOAK_REALM=http://keycloak:8080/realms/kuadrant CLIENT_ID=matrix-quotes\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#apply-the-changes-to-the-authconfig","title":"Apply the changes to the AuthConfig
","text":"kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: matrix-quotes-protection\nspec:\n hosts:\n\n - matrix-quotes.127.0.0.1.nip.io\n authentication:\n \"idp-users\":\n jwt:\n issuerUrl: http://keycloak:8080/realms/kuadrant\n credentials:\n cookie:\n name: TOKEN\n response:\n unauthenticated:\n code: 302\n headers:\n \"Location\":\n selector: \"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/auth?client_id=matrix-quotes&redirect_uri=http://matrix-quotes.127.0.0.1.nip.io:8000/auth?redirect_to={request.path}&scope=openid&response_type=code\"\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application-again","title":"Consume the application again","text":"Refresh the browser window or navigate again to http://matrix-quotes.127.0.0.1.nip.io:8000.
Click on the cards to read quotes from characters of the movie. You should be redirected to login page this time served by the Keycloak server.
Log in as Jane (a user of the Keycloak realm):
- Username: jane
- Password: p
Click again on the cards and check that now you are able to access the inner pages.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/user-credential-1\nkubectl delete authconfig/matrix-quotes-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/","title":"User guide: Edge Authentication Architecture (EAA)","text":"Edge Authentication Architecture (EAA) is a pattern where more than extracting authentication logics and specifics from the application codebase to a proper authN/authZ layer, this is pushed to the edge of your cloud network, without violating the Zero Trust principle nevertheless.
The very definition of \"edge\" is subject to discussion, but the underlying idea is that clients (e.g. API clients, IoT devices, etc.) authenticate with a layer that, before moving traffic to inside the network:
- understands the complexity of all the different methods of authentication supported;
- sometimes some token normalization is involved;
- eventually enforces some preliminary authorization policies; and
- possibly filters data bits that are sensitive to privacy concerns (e.g. to comply with local legislation such as GRPD, CCPA, etc)
As a minimum, EAA allows to simplify authentication between applications and microservices inside the network, as well as to reduce authorization to domain-specific rules and policies, rather than having to deal all the complexity to support all types of clients in every node.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Festival Wristband tokens
- Identity verification & authentication \u2192 Identity extension
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
Festival Wristbands are OpenID Connect ID tokens (signed JWTs) issued by Authorino by the end of the Auth Pipeline, for authorized requests. It can be configured to include claims based on static values and values fetched from the Authorization JSON.
Check out as well the user guides about Token normalization, Authentication with API keys and OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
- jwt, to inspect JWTs (optional)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino and configuring 2 environments of an architecture, edge
and internal
.
The first environment is a facade for handling the first layer of authentication and exchanging any valid presented authentication token for a Festival Wristband token. In the second, we will deploy a sample service called Talker API that the authorization service will ensure to receive only authenticated traffic presented with a valid Festival Wristband.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u2779.
At steps \u2779 and \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-namespaces","title":"\u2777 Create the namespaces","text":"For simplicity, this examples will set up edge and internal nodes in different namespaces of the same Kubernetes cluster. Those will share a same single cluster-wide Authorino instance. In real-life scenarios, it does not have to be like that.
kubectl create namespace authorino\nkubectl create namespace edge\nkubectl create namespace internal\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-authorino","title":"\u2778 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources cluster-wide2, with TLS disabled3.
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-edge","title":"\u2779 Setup the Edge","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy","title":"Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up external authorization with the Authorino instance.4
kubectl -n edge apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-edge-deploy.yaml\n
The command above creates an Ingress
with host name edge.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 9000 to the Envoy service running inside the cluster:
kubectl -n edge port-forward deployment/envoy 9000:9000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig","title":"Create the AuthConfig
","text":"Create a required secret that will be used by Authorino to sign the Festival Wristband tokens:
kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: wristband-signing-key\nstringData:\n key.pem: |\n -----BEGIN EC PRIVATE KEY-----\n MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n -----END EC PRIVATE KEY-----\ntype: Opaque\nEOF\n
Create the config:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n edge apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: edge-auth\nspec:\n hosts:\n\n - edge.127.0.0.1.nip.io\n authentication:\n \"api-clients\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n allNamespaces: true\n credentials:\n authorizationHeader:\n prefix: APIKEY\n overrides:\n \"username\":\n selector: auth.identity.metadata.annotations.authorino\\.kuadrant\\.io/username\n \"idp-users\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n defaults:\n \"username\":\n selector: auth.identity.preferred_username\n response:\n success:\n dynamicMetadata:\n \"wristband\":\n wristband:\n issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\n customClaims:\n \"username\":\n selector: auth.identity.username\n tokenDuration: 300\n signingKeyRefs:\n - name: wristband-signing-key\n algorithm: ES256\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-internal-workload","title":"\u277a Setup the internal workload","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-the-talker-api","title":"Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy_1","title":"Setup Envoy","text":"This other bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.
kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-node-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl -n internal port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig_1","title":"Create the AuthConfig
","text":"Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n internal apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"edge-authenticated\":\n jwt:\n issuerUrl: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n annotations:\n authorino.kuadrant.io/username: alice\n authorino.kuadrant.io/email: alice@host\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#using-the-api-key-to-authenticate","title":"Using the API key to authenticate","text":"Authenticate at the edge:
WRISTBAND_TOKEN=$(curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n
Consume the API:
curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Try to consume the API with authentication token that is only accepted in the edge:
curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"edge-authenticated\"\n# x-ext-auth-reason: credential not found\n
(Optional) Inspect the wristband token and verify that it only contains restricted info to authenticate and authorize with internal apps.
jwt decode $WRISTBAND_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# {\n# \"exp\": 1638452051,\n# \"iat\": 1638451751,\n# \"iss\": \"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\",\n# \"sub\": \"02cb51ea0e1c9f3c0960197a2518c8eb4f47e1b9222a968ffc8d4c8e783e4d19\",\n# \"username\": \"alice\"\n# }\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#authenticating-with-the-keycloak-server","title":"Authenticating with the Keycloak server","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl -n edge run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
(Optional) Inspect the access token issue by Keycloak and verify and how it contains more details about the identity than required to authenticate and authorize with internal apps.
jwt decode $ACCESS_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# { [...]\n# \"email\": \"jane@kuadrant.io\",\n# \"email_verified\": true,\n# \"exp\": 1638452220,\n# \"family_name\": \"Smith\",\n# \"given_name\": \"Jane\",\n# \"iat\": 1638451920,\n# \"iss\": \"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\",\n# \"jti\": \"699f6e49-dea4-4f29-ae2a-929a3a18c94b\",\n# \"name\": \"Jane Smith\",\n# \"preferred_username\": \"jane\",\n# \"realm_access\": {\n# \"roles\": [\n# \"offline_access\",\n# \"member\",\n# \"admin\",\n# \"uma_authorization\"\n# ]\n# },\n# [...]\n
As Jane, obtain a limited wristband token at the edge:
WRISTBAND_TOKEN=$(curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n
Consume the API:
curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete namespace edge\nkubectl delete namespace internal\nkubectl delete namespace authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino and Authorino Operator manifests, run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/","title":"User guide: Mixing Envoy built-in filter for auth and Authorino","text":"Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.
In this user guide, we will set up Envoy and Authorino to protect a service called the Talker API service, with JWT authentication handled in Envoy and a more complex authorization policy enforced in Authorino.
The policy defines a geo-fence by which only requests originated in Great Britain (country code: GB) will be accepted, unless the user is bound to a role called 'admin' in the auth server, in which case no geofence is enforced.
All requests to the Talker API will be authenticated in Envoy. However, requests to /global
will not trigger the external authorization.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Plain
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Pattern-matching authorization
- Dynamic response \u2192 Custom denial status
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: authorino\n name: envoy\ndata:\n envoy.yaml: |\n static_resources:\n clusters:\n\n - name: talker-api\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: talker-api\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: talker-api\n port_value: 3000\n - name: keycloak\n connect_timeout: 0.25s\n type: logical_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: keycloak\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: keycloak.keycloak.svc.cluster.local\n port_value: 8080\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization\n port_value: 50051\n listeners:\n - address:\n socket_address:\n address: 0.0.0.0\n port_value: 8000\n filter_chains:\n - filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n stat_prefix: local\n route_config:\n name: local_route\n virtual_hosts:\n - name: local_service\n domains: ['*']\n routes:\n - match: { path_separated_prefix: /global }\n route: { cluster: talker-api }\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n disabled: true\n - match: { prefix: / }\n route: { cluster: talker-api }\n http_filters:\n - name: envoy.filters.http.jwt_authn\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\n providers:\n keycloak:\n issuer: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n remote_jwks:\n http_uri:\n uri: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/certs\n cluster: keycloak\n timeout: 5s\n cache_duration:\n seconds: 300\n payload_in_metadata: verified_jwt\n rules:\n - match: { prefix: / }\n requires: { provider_name: keycloak }\n - name: envoy.filters.http.ext_authz\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false\n metadata_context_namespaces:\n - envoy.filters.http.jwt_authn\n grpc_service:\n envoy_grpc:\n cluster_name: authorino\n timeout: 1s\n - name: envoy.filters.http.router\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n use_remote_address: true\n admin:\n access_log_path: \"/tmp/admin_access.log\"\n address:\n socket_address:\n address: 0.0.0.0\n port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: authorino\n svc: envoy\n name: envoy\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: authorino\n svc: envoy\n template:\n metadata:\n labels:\n app: authorino\n svc: envoy\n spec:\n containers:\n - args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n command:\n - /usr/local/bin/envoy\n image: envoyproxy/envoy:v1.22-latest\n name: envoy\n ports:\n - containerPort: 8000\n name: web\n - containerPort: 8001\n name: admin\n volumeMounts:\n - mountPath: /usr/local/etc/envoy\n name: config\n readOnly: true\n volumes:\n - configMap:\n items:\n - key: envoy.yaml\n path: envoy.yaml\n name: envoy\n name: config\n---\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: authorino\n name: envoy\nspec:\n ports:\n - name: web\n port: 8000\n protocol: TCP\n selector:\n app: authorino\n svc: envoy\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: ingress-wildcard-host\nspec:\n rules:\n - host: talker-api.127.0.0.1.nip.io\n http:\n paths:\n - backend:\n service:\n name: envoy\n port:\n number: 8000\n path: /\n pathType: Prefix\nEOF\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-ip-location-service","title":"\u277a Deploy the IP Location service","text":"The IP Location service is a simple service that resolves an IPv4 address into geo location info.
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/ip-location/ip-location-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#create-an-authconfig","title":"\u277b Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"jwt\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n metadata:\n \"geoinfo\":\n http:\n url: 'http://ip-location.default.svc.cluster.local:3000/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}'\n headers:\n \"Accept\":\n value: application/json\n cache:\n key:\n selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\":\\\",\\\"}\"\n authorization:\n \"geofence\":\n when:\n - selector: auth.identity.realm_access.roles\n operator: excl\n value: admin\n patternMatching:\n patterns:\n - selector: auth.metadata.geoinfo.country_iso_code\n operator: eq\n value: \"GB\"\n response:\n unauthorized:\n message:\n selector: \"The requested resource is not available in {auth.metadata.geoinfo.country_name}\"\nEOF\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-a-token-and-consume-the-api","title":"\u277c Obtain a token and consume the API","text":""},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user John, a non-admin (member) user:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As John, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: The requested resource is not available in Italy\n
As John, consume a path of the API that will cause Envoy to skip external authorization:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"Obtain an access token with the Keycloak server for Jane, an admin user:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As Jane, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As Jane, consume a path of the API that will cause Envoy to skip external authorization:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete ingress/ingress-wildcard-host\nkubectl delete service/envoy\nkubectl delete deployment/envoy\nkubectl delete configmap/envoy\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/external-metadata/","title":"User guide: Fetching auth metadata from external sources","text":"Get online data from remote HTTP services to enhance authorization rules.
Authorino capabilities featured in this guide: - External auth metadata \u2192 HTTP GET/GET-by-POST
- Identity verification & authentication \u2192 API key
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
You can configure Authorino to fetch additional metadata from external sources in request-time, by sending either GET or POST request to an HTTP service. The service is expected to return a JSON content which is appended to the Authorization JSON, thus becoming available for usage in other configs of the Auth Pipeline, such as in authorization policies or custom responses.
URL, parameters and headers of the request to the external source of metadata can be configured, including with dynamic values. Authentication between Authorino and the service can be set as part of these configuration options, or based on shared authentication token stored in a Kubernetes Secret
.
Check out as well the user guides about Authentication with API keys and Open Policy Agent (OPA) Rego policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/external-metadata/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/external-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/external-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/external-metadata/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/external-metadata/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/external-metadata/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, we will implement a geofence policy for the API, using OPA and metadata fetching from an external service that returns geolocalization JSON data for a given IP address. The policy establishes that only GET
requests are allowed and the path of the request should be in the form /{country-code}/*
, where {country-code}
is the 2-character code of the country where the client is identified as being physically present.
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n metadata:\n \"geo\":\n http:\n url: 'http://ip-api.com/json/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}?fields=countryCode'\n headers:\n \"Accept\":\n value: application/json\n authorization:\n \"geofence\":\n opa:\n rego: |\n import input.context.request.http\n\n allow {\n http.method = \"GET\"\n split(http.path, \"/\") = [_, requested_country, _]\n lower(requested_country) == lower(object.get(input.auth.metadata.geo, \"countryCode\", \"\"))\n }\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON, including the description of the @extract
string modifier.
"},{"location":"authorino/docs/user-guides/external-metadata/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/external-metadata/#consume-the-api","title":"\u277c Consume the API","text":"From an IP address assigned to the United Kingdom of Great Britain and Northern Ireland (country code GB):
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 403 Forbidden\n
From an IP address assigned to Italy (country code IT):
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 109.112.34.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 403 Forbidden\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 109.112.34.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/external-metadata/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/hello-world/","title":"User guide: Hello World","text":""},{"location":"authorino/docs/user-guides/hello-world/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant, you can skip step \u2778. You may already have Authorino installed and running as well. In this case, skip also step \u277a. If you even have your workload cluster configured, with sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, go straight to step \u277c.
At step \u277c, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/hello-world/#create-the-namespace","title":"\u2776 Create the namespace","text":"kubectl create namespace hello-world\n# namespace/hello-world created\n
"},{"location":"authorino/docs/user-guides/hello-world/#deploy-the-talker-api","title":"\u2777 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n# deployment.apps/talker-api created\n# service/talker-api created\n
"},{"location":"authorino/docs/user-guides/hello-world/#setup-envoy","title":"\u2778 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.1
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/envoy-deploy.yaml\n# configmap/envoy created\n# deployment.apps/envoy created\n# service/envoy created\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl -n hello-world port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-unprotected","title":"\u2779 Consume the API (unprotected)","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/hello-world/#protect-the-api","title":"\u277a Protect the API","text":""},{"location":"authorino/docs/user-guides/hello-world/#install-the-authorino-operator","title":"Install the Authorino Operator","text":"curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/hello-world/#deploy-authorino","title":"Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service2 that watches for AuthConfig
resources in the hello-world
namespace3, with TLS disabled4.
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authorino.yaml\n# authorino.operator.authorino.kuadrant.io/authorino created\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-behind-envoy-and-authorino","title":"\u277b Consume the API behind Envoy and Authorino","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 404 Not Found\n# x-ext-auth-reason: Service not found\n
Authorino does not know about the talker-api.127.0.0.1.nip.io
host, hence the 404 Not Found
. Let's teach Authorino about this host by applying an AuthConfig
.
"},{"location":"authorino/docs/user-guides/hello-world/#apply-the-authconfig","title":"\u277c Apply the AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authconfig.yaml\n# authconfig.authorino.kuadrant.io/talker-api-protection created\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-without-credentials","title":"\u277d Consume the API without credentials","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-clients\"\n# x-ext-auth-reason: credential not found\n
"},{"location":"authorino/docs/user-guides/hello-world/#grant-access-to-the-api-with-a-tailor-made-security-scheme","title":"Grant access to the API with a tailor-made security scheme","text":"Check out other user guides for several use-cases of authentication and authorization, and the instructions to implement them using Authorino.
A few examples of available ser guides:
- Authentication with API keys
- Authentication with JWTs and OpenID Connect Discovery
- Authentication with Kubernetes tokens (TokenReview API)
- Authorization with Open Policy Agent (OPA) Rego policies
- Authorization with simple JSON pattern-matching rules (e.g. JWT claims)
- Authorization with Kubernetes RBAC (SubjectAccessReview API)
- Fetching auth metadata from external sources
- Token normalization
"},{"location":"authorino/docs/user-guides/hello-world/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the namespaces created in step 1 and 5:
kubectl delete namespace hello-world\nkubectl delete namespace authorino-operator\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/host-override/","title":"Host override via context extension","text":"By default, Authorino uses the host information of the HTTP request (Attributes.Http.Host
) to lookup for an indexed AuthConfig to be enforced1. The host info be overridden by supplying a host
entry as a (per-route) context extension (Attributes.ContextExtensions
), which takes precedence whenever present.
Overriding the host attribute of the HTTP request can be useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup.
\u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. In this guide:
- Example of host override for path prefix-based lookup
- Example of host override for wildcard subdomain lookup
"},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-path-prefix-based-lookup","title":"Example of host override for path prefix-based lookup","text":"In this use case, 2 different APIs (i.e. Dogs API and Cats API) are served under the same base domain, and differentiated by the path prefix:
pets.com/dogs
\u2192 Dogs API pets.com/cats
\u2192 Cats API
Edit the Envoy config to extend the external authorization settings at the level of the routes, with the host
value that will be favored by Authorino before the actual host attribute of the HTTP request:
virtual_hosts:\n\n- name: pets-api\n domains: ['pets.com']\n routes:\n - match:\n prefix: /dogs\n route:\n cluster: dogs-api\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: dogs.pets.com\n - match:\n prefix: /cats\n route:\n cluster: cats-api\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: cats.pets.com\n
Create the AuthConfig for the Pets API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: dogs-api-protection\nspec:\n hosts:\n\n - dogs.pets.com\n\n authentication: [...]\n
Create the AuthConfig for the Cats API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: cats-api-protection\nspec:\n hosts:\n\n - cats.pets.com\n\n authentication: [...]\n
Notice that the host subdomains dogs.pets.com
and cats.pets.com
are not really requested by the API consumers. Rather, users send requests to pets.com/dogs
and pets.com/cats
. When routing those requests, Envoy makes sure to inject the corresponding context extensions that will induce the right lookup in Authorino.
"},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-wildcard-subdomain-lookup","title":"Example of host override for wildcard subdomain lookup","text":"In this use case, a single Pets API serves requests for any subdomain that matches *.pets.com
, e.g.:
dogs.pets.com
\u2192 Pets API cats.pets.com
\u2192 Pets API
Edit the Envoy config to extend the external authorization settings at the level of the virtual host, with the host
value that will be favored by Authorino before the actual host attribute of the HTTP request:
virtual_hosts:\n\n- name: pets-api\n domains: ['*.pets.com']\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: pets.com\n routes:\n - match:\n prefix: /\n route:\n cluster: pets-api\n
The host
context extension used above is any key that matches one of the hosts listed in the targeted AuthConfig.
Create the AuthConfig for the Pets API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: pets-api-protection\nspec:\n hosts:\n\n - pets.com\n\n authentication: [...]\n
Notice that requests to dogs.pets.com
and to cats.pets.com
are all routed by Envoy to the same API, with same external authorization configuration. in all the cases, Authorino will lookup for the indexed AuthConfig associated with pets.com
. The same is valid for a request sent, e.g., to birds.pets.com
.
-
For further details about Authorino lookup of AuthConfig, check out Host lookup.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/http-basic-authentication/","title":"User guide: HTTP \"Basic\" Authentication (RFC 7235)","text":"Turn Authorino API key Secret
s settings into HTTP basic auth.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 API key
- Authorization \u2192 Pattern-matching authorization
HTTP \"Basic\" Authentication (RFC 7235) is not recommended if you can afford other more secure methods such as OpenID Connect. To support legacy nonetheless it is sometimes necessary to implement it.
In Authorino, HTTP \"Basic\" Authentication can be modeled leveraging the API key authentication feature (stored as Kubernetes Secret
s with an api_key
entry and labeled to match selectors specified in spec.identity.apiKey.selector
of the AuthConfig
).
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The config uses API Key secrets to store base64-encoded username:password
HTTP \"Basic\" authentication credentials. The config also specifies an Access Control List (ACL) by which only user john
is authorized to consume the /bye
endpoint of the API.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"http-basic-auth\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n authorizationHeader:\n prefix: Basic\n authorization:\n \"acl\":\n when:\n - selector: context.request.http.path\n operator: eq\n value: /bye\n patternMatching:\n patterns:\n - selector: context.request.http.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\"}\n operator: eq\n value: john\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON, including the description of the string modifiers @extract
and @case
used above. Check out as well the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-user-credentials","title":"\u277b Create user credentials","text":"To create credentials for HTTP \"Basic\" Authentication, store each username:password
, base64-encoded, in the api_key
value of the Kubernetes Secret
resources. E.g.:
printf \"john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" | base64\n# am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA==\n
Create credentials for user John:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: basic-auth-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA== # john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
Create credentials for user Jane:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: basic-auth-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: amFuZTpkTnNScnNhcHkwbk5Dd210NTM3ZkhGcHl4MGNCc0xFcA== # jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#consume-the-api","title":"\u277c Consume the API","text":"As John (authorized in the ACL):
curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/bye\n# HTTP/1.1 200 OK\n
As Jane (NOT authorized in the ACL):
curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/bye -i\n# HTTP/1.1 403 Forbidden\n
With an invalid user/password:
curl -u unknown:invalid http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"http-basic-auth\"\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#revoke-access-to-the-api","title":"\u277d Revoke access to the API","text":"kubectl delete secret/basic-auth-1\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/basic-auth-1\nkubectl delete secret/basic-auth-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/injecting-data/","title":"User guide: Injecting data in the request","text":"Inject HTTP headers with serialized JSON content.
Authorino capabilities featured in this guide: - Dynamic response \u2192 JSON injection
- Identity verification & authentication \u2192 API key
Inject serialized custom JSON objects as HTTP request headers. Values can be static or fetched from the Authorization JSON.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/injecting-data/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/injecting-data/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/injecting-data/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/injecting-data/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/injecting-data/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/injecting-data/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The following defines a JSON object to be injected as an added HTTP header into the request, named after the response config x-ext-auth-data
. The object includes 3 properties:
- a static value
authorized: true
; - a dynamic value
request-time
, from Envoy-supplied contextual data present in the Authorization JSON; and - a greeting message
geeting-message
that interpolates a dynamic value read from an annotation of the Kubernetes Secret
resource that represents the API key used to authenticate into a static string.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-ext-auth-data\":\n json:\n properties:\n \"authorized\":\n value: true\n \"request-time\":\n selector: context.request.time.seconds\n \"greeting-message\":\n selector: Hello, {auth.identity.metadata.annotations.auth-data\\/name}!\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/injecting-data/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/name: Rita\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/injecting-data/#consume-the-api","title":"\u277c Consume the API","text":"curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# {\n# \"method\": \"GET\",\n# \"path\": \"/hello\",\n# \"query_string\": null,\n# \"body\": \"\",\n# \"headers\": {\n# \u2026\n# \"X-Ext-Auth-Data\": \"{\\\"authorized\\\":true,\\\"greeting-message\\\":\\\"Hello, Rita!\\\",\\\"request-time\\\":1637954644}\",\n# },\n# \u2026\n# }\n
"},{"location":"authorino/docs/user-guides/injecting-data/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/","title":"User guide: Simple pattern-matching authorization policies","text":"Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.
Authorino capabilities featured in this guide: - Authorization \u2192 Pattern-matching authorization
- Identity verification & authentication \u2192 JWT verification
Authorino provides a built-in authorization module to check simple pattern-matching rules against the Authorization JSON. This is an alternative to OPA when all you want is to check for some simple rules, without complex logics, such as match the value of a JWT claim.
Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The email-verified-only
authorization policy ensures that users consuming the API from a given network (IP range 192.168.1/24) must have their emails verified.
The email_verified
claim is a property of the identity added to the JWT by the OpenID Connect issuer.
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"email-verified-only\":\n when:\n - selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\": \\\",\\\"}\"\n operator: matches\n value: 192\\\\.168\\\\.1\\\\.\\\\d+\n patternMatching:\n patterns:\n - selector: auth.identity.email_verified\n operator: eq\n value: \"true\"\nEOF\n
Check out the docs for information about semantics and operators supported by the JSON pattern-matching authorization feature, as well the common feature JSON paths for reading from the Authorization JSON, including the description of the string modifier @extract
used above. Check out as well the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-jane-email-verified","title":"Obtain an access token and consume the API as Jane (email verified)","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As Jane, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As Jane, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-peter-email-not-verified","title":"Obtain an access token and consume the API as Peter (email NOT verified)","text":"Obtain an access token with the Keycloak server for Peter:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Peter, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As Peter, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete namespace keycloak\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/","title":"User guide: Authorization with Keycloak Authorization Services","text":"Keycloak provides a powerful set of tools (REST endpoints and administrative UIs), also known as Keycloak Authorization Services, to manage and enforce authorization, workflows for multiple access control mechanisms, including discretionary user access control and user-managed permissions.
This user guide is an example of how to use Authorino as an adapter to Keycloak Authorization Services while still relying on the reverse-proxy integration pattern, thus not involving importing an authorization library nor rebuilding the application's code.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Keycloak server
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Forward local requests to Keycloak running inside the cluster (if using Kind):
kubectl -n keycloak port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, Authorino will accept access tokens (JWTs) issued by the Keycloak server. These JWTs can be either normal Keycloak ID tokens or Requesting Party Tokens (RPT).
RPTs include claims about the permissions of the user regarding protected resources and scopes associated with a Keycloak authorization client that the user can access.
When the supplied access token is an RPT, Authorino will just validate whether the user's granted permissions present in the token include the requested resource ID (translated from the path) and scope (inferred from the HTTP method). If the token does not contain a permissions
claim (i.e. it is not an RPT), Authorino will negotiate a User-Managed Access (UMA) ticket on behalf of the user and try to obtain an RPT on that UMA ticket.
In cases of asynchronous user-managed permission control, the first request to the API using a normal Keycloak ID token is denied by Authorino. The user that owns the resource acknowledges the access request in the Keycloak UI. If access is granted, the new permissions will be reflected in subsequent RPTs obtained by Authorino on behalf of the requesting party.
Whenever an RPT with proper permissions is obtained by Authorino, the RPT is supplied back to the API consumer, so it can be used in subsequent requests thus skipping new negotiations of UMA tickets.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"uma\":\n opa:\n rego: |\n pat := http.send({\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\": \"post\",\"headers\":{\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":\"grant_type=client_credentials\"}).body.access_token\n resource_id := http.send({\"url\":concat(\"\",[\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/resource_set?uri=\",input.context.request.http.path]),\"method\":\"get\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat])}}).body[0]\n scope := lower(input.context.request.http.method)\n access_token := trim_prefix(input.context.request.http.headers.authorization, \"Bearer \")\n\n default rpt = \"\"\n rpt = access_token { object.get(input.auth.identity, \"authorization\", {}).permissions }\n else = rpt_str {\n ticket := http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/permission\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat]),\"Content-Type\":\"application/json\"},\"raw_body\":concat(\"\",[\"[{\\\"resource_id\\\":\\\"\",resource_id,\"\\\",\\\"resource_scopes\\\":[\\\"\",scope,\"\\\"]}]\"])}).body.ticket\n rpt_str := object.get(http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",access_token]),\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":concat(\"\",[\"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&ticket=\",ticket,\"&submit_request=true\"])}).body, \"access_token\", \"\")\n }\n\n allow {\n permissions := object.get(io.jwt.decode(rpt)[1], \"authorization\", { \"permissions\": [] }).permissions\n permissions[i]\n permissions[i].rsid = resource_id\n permissions[i].scopes[_] = scope\n }\n allValues: true\n response:\n success:\n headers:\n \"x-keycloak\":\n when:\n\n - selector: auth.identity.authorization.permissions\n operator: eq\n value: \"\"\n json:\n properties:\n \"rpt\":\n selector: auth.authorization.uma.rpt\nEOF\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for user Jane:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#consume-the-api","title":"\u277c Consume the API","text":"As Jane, try to send a GET
request to the protected resource /greetings/1
, owned by user John.
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n
As John, log in to http://localhost:8080/realms/kuadrant/account in the web browser (username: john
/ password: p
), and grant access to the resource greeting-1
for Jane. A pending permission request by Jane shall exist in the list of John's Resources.
As Jane, try to consume the protected resource /greetings/1
again:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n#\n# {\u2026\n# \"headers\": {\u2026\n# \"X-Keycloak\": \"{\\\"rpt\\\":\\\"<RPT>\", \u2026\n
Copy the RPT from the response and repeat the request now using the RPT to authenticate:
curl -H \"Authorization: Bearer <RPT>\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/","title":"User guide: Kubernetes RBAC for service authorization (SubjectAccessReview API)","text":"Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.
Authorino capabilities featured in this guide: - Authorization \u2192 Kubernetes SubjectAccessReview
- Identity verification & authentication \u2192 Kubernetes TokenReview
Authorino can delegate authorization decision to the Kubernetes authorization system, allowing permissions to be stored and managed using the Kubernetes Role-Based Access Control (RBAC) for example. The feature is based on the SubjectAccessReview
API and can be used for resourceAttributes
(parameters defined in the AuthConfig
) or nonResourceAttributes
(inferring HTTP path and verb from the original request).
Check out as well the user guide about Authentication with Kubernetes tokens (TokenReview API).
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create
TokenRequest
s (to consume the protected service from outside the cluster) - jq
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The AuthConfig
below sets all Kubernetes service accounts as trusted users of the API, and relies on the Kubernetes RBAC to enforce authorization using Kubernetes SubjectAccessReview API for non-resource endpoints:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n - envoy.default.svc.cluster.local\n authentication:\n \"service-accounts\":\n kubernetesTokenReview:\n audiences: [\"https://kubernetes.default.svc.cluster.local\"]\n authorization:\n \"k8s-rbac\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.user.username\nEOF\n
Check out the spec for the Authorino Kubernetes SubjectAccessReview authorization feature, for resource attributes permission checks where SubjectAccessReviews issued by Authorino are modeled in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb).
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-roles-associated-with-endpoints-of-the-api","title":"\u277b Create roles associated with endpoints of the API","text":"Because the k8s-rbac
policy defined in the AuthConfig
in the previous step is for non-resource access review requests, the corresponding roles and role bindings have to be defined at cluster scope.
Create a talker-api-greeter
role whose users and service accounts bound to this role can consume the non-resource endpoints POST /hello
and POST /hi
of the API:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: talker-api-greeter\nrules:\n\n- nonResourceURLs: [\"/hello\"]\n verbs: [\"post\"]\n- nonResourceURLs: [\"/hi\"]\n verbs: [\"post\"]\nEOF\n
Create a talker-api-speaker
role whose users and service accounts bound to this role can consume the non-resource endpoints POST /say/*
of the API:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: talker-api-speaker\nrules:\n\n- nonResourceURLs: [\"/say/*\"]\n verbs: [\"post\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-the-serviceaccounts-and-permissions-to-consume-the-api","title":"\u277c Create the ServiceAccount
s and permissions to consume the API","text":"Create service accounts api-consumer-1
and api-consumer-2
:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-1\nEOF\n
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-2\nEOF\n
Bind both service accounts to the talker-api-greeter
role:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: talker-api-greeter-rolebinding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: talker-api-greeter\nsubjects:\n\n- kind: ServiceAccount\n name: api-consumer-1\n namespace: default\n- kind: ServiceAccount\n name: api-consumer-2\n namespace: default\nEOF\n
Bind service account api-consumer-1
to the talker-api-speaker
role:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: talker-api-speaker-rolebinding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: talker-api-speaker\nsubjects:\n\n- kind: ServiceAccount\n name: api-consumer-1\n namespace: default\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#consume-the-api","title":"\u277d Consume the API","text":"Run a pod that consumes one of the greeting endpoints of the API from inside the cluster, as service account api-consumer-1
, bound to the talker-api-greeter
and talker-api-speaker
cluster roles in the Kubernetes RBAC:
kubectl run greeter --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/hi\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-1\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 200\n
Run a pod that sends a POST
request to /say/blah
from within the cluster, as service account api-consumer-1
:
kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-1\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 200\n
Run a pod that sends a POST
request to /say/blah
from within the cluster, as service account api-consumer-2
, bound only to the talker-api-greeter
cluster role in the Kubernetes RBAC:
kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-2\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 403\n
Extra: consume the API as service account api-consumer-2
from outside the cluster Obtain a short-lived access token for service account api-consumer-2
, bound to the talker-api-greeter
cluster role in the Kubernetes RBAC, using the Kubernetes TokenRequest API:
export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-2/token -f - | jq -r .status.token)\n
Consume the API as api-consumer-2
from outside the cluster:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/say/something -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete serviceaccount/api-consumer-1\nkubectl delete serviceaccount/api-consumer-2\nkubectl delete clusterrolebinding/talker-api-greeter-rolebinding\nkubectl delete clusterrolebinding/talker-api-speaker-rolebinding\nkubectl delete clusterrole/talker-api-greeter\nkubectl delete clusterrole/talker-api-speaker\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/","title":"User guide: Authentication with Kubernetes tokens (TokenReview API)","text":"Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Kubernetes TokenReview
Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).
These tokens can be either ServiceAccount
tokens or any valid user access tokens issued to users of the Kubernetes server API.
The audiences
claim of the token must include the requested host and port of the protected API (default), or all audiences specified in spec.identity.kubernetes.audiences
of the AuthConfig
.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create
TokenRequest
s (to consume the protected service from outside the cluster) - jq
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n - envoy.default.svc.cluster.local\n authentication:\n \"authorized-service-accounts\":\n kubernetesTokenReview:\n audiences:\n - talker-api\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-protected-by-authorino","title":"\u277b Consume the API protected by Authorino","text":""},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-a-serviceaccount","title":"Create a ServiceAccount
","text":"Create a Kubernetes ServiceAccount
to identify the consumer application that will send requests to the protected API:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-1\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-outside-the-cluster","title":"Consume the API from outside the cluster","text":"Obtain a short-lived access token for the api-consumer-1
service account:
export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"audiences\": [\"talker-api\"], \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-1/token -f - | jq -r .status.token)\n
Consume the API with a valid Kubernetes token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Consume the API with the Kubernetes token expired (10 minutes):
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"authorized-service-accounts\"\n# x-ext-auth-reason: Not authenticated\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-inside-the-cluster","title":"Consume the API from inside the cluster","text":"Deploy an application that consumes an endpoint of the Talker API, in a loop, every 10 seconds. The application uses a short-lived service account token mounted inside the container using Kubernetes Service Account Token Volume Projection to authenticate.
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Pod\nmetadata:\n name: api-consumer\nspec:\n containers:\n\n - name: api-consumer\n image: quay.io/kuadrant/authorino-examples:api-consumer\n command: [\"./run\"]\n args:\n - --endpoint=http://envoy.default.svc.cluster.local:8000/hello\n - --token-path=/var/run/secrets/tokens/api-token\n - --interval=10\n volumeMounts:\n - mountPath: /var/run/secrets/tokens\n name: talker-api-access-token\n serviceAccountName: api-consumer-1\n volumes:\n - name: talker-api-access-token\n projected:\n sources:\n - serviceAccountToken:\n path: api-token\n expirationSeconds: 7200\n audience: talker-api\nEOF\n
Check the logs of api-consumer
:
kubectl logs -f api-consumer\n# Sending...\n# 200\n# 200\n# 200\n# 200\n# ...\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete pod/api-consumer\nkubectl delete serviceaccount/api-consumer-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/mtls-authentication/","title":"User guide: Authentication with X.509 certificates and Mutual Transport Layer Security (mTLS)","text":"Verify client X.509 certificates against trusted root CAs stored in Kubernetes Secret
s to authenticate access to APIs protected with Authorino.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 X.509 client certificate authentication
- Authorization \u2192 Pattern-matching authorization
Authorino can verify x509 certificates presented by clients for authentication on the request to the protected APIs, at application level.
Trusted root Certificate Authorities (CA) are stored as Kubernetes kubernetes.io/tls
Secrets labeled according to selectors specified in the AuthConfig, watched and cached by Authorino.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/mtls-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/mtls-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following commands will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS enabled3.
Create the TLS certificates for the Authorino service:
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/default/g\" | kubectl apply -f -\n
Request the Authorino instance:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#create-a-ca","title":"\u2779 Create a CA","text":"Create a CA (Certificate Authority) certificate to issue the client certificates that will be used to authenticate clients that send requests to the Talker API:
openssl req -x509 -sha256 -nodes \\\n -days 365 \\\n -newkey rsa:2048 \\\n -subj \"/CN=talker-api-ca\" \\\n -addext basicConstraints=CA:TRUE \\\n -addext keyUsage=digitalSignature,keyCertSign \\\n -keyout /tmp/ca.key \\\n -out /tmp/ca.crt\n
Store the CA cert in a Kubernetes Secret
, labeled to be discovered by Authorino and to be mounted in the file system of the Envoy container:
kubectl create secret tls talker-api-ca --cert=/tmp/ca.crt --key=/tmp/ca.key\nkubectl label secret talker-api-ca authorino.kuadrant.io/managed-by=authorino app=talker-api\n
Prepare an extension file for the client certificate signing requests:
cat > /tmp/x509v3.ext << EOF\nauthorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nkeyUsage=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment\nextendedKeyUsage=clientAuth\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#setup-envoy","title":"\u277a Setup Envoy","text":"The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: envoy\n name: envoy\ndata:\n envoy.yaml: |\n static_resources:\n listeners:\n\n - address:\n socket_address:\n address: 0.0.0.0\n port_value: 8000\n filter_chains:\n - transport_socket:\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n common_tls_context:\n tls_certificates:\n - certificate_chain: {filename: \"/etc/ssl/certs/talker-api/tls.crt\"}\n private_key: {filename: \"/etc/ssl/certs/talker-api/tls.key\"}\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/talker-api/tls.crt\n filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n stat_prefix: local\n route_config:\n name: local_route\n virtual_hosts:\n - name: local_service\n domains: ['*']\n routes:\n - match: { prefix: / }\n route: { cluster: talker-api }\n http_filters:\n - name: envoy.filters.http.ext_authz\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false\n include_peer_certificate: true\n grpc_service:\n envoy_grpc: { cluster_name: authorino }\n timeout: 1s\n - name: envoy.filters.http.router\n typed_config: {}\n use_remote_address: true\n clusters:\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization\n port_value: 50051\n transport_socket:\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n common_tls_context:\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/authorino-ca-cert.crt\n - name: talker-api\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: talker-api\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: talker-api\n port_value: 3000\n admin:\n access_log_path: \"/tmp/admin_access.log\"\n address:\n socket_address:\n address: 0.0.0.0\n port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: envoy\n name: envoy\nspec:\n selector:\n matchLabels:\n app: envoy\n template:\n metadata:\n labels:\n app: envoy\n spec:\n containers:\n - args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n command:\n - /usr/local/bin/envoy\n image: envoyproxy/envoy:v1.19-latest\n name: envoy\n ports:\n - containerPort: 8000\n name: web\n - containerPort: 8001\n name: admin\n volumeMounts:\n - mountPath: /usr/local/etc/envoy\n name: config\n readOnly: true\n - mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n name: authorino-ca-cert\n readOnly: true\n subPath: ca.crt\n - mountPath: /etc/ssl/certs/talker-api\n name: talker-api-ca\n readOnly: true\n volumes:\n - configMap:\n items:\n - key: envoy.yaml\n path: envoy.yaml\n name: envoy\n name: config\n - name: authorino-ca-cert\n secret:\n defaultMode: 420\n secretName: authorino-ca-cert\n - name: talker-api-ca\n secret:\n defaultMode: 420\n secretName: talker-api-ca\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: envoy\nspec:\n selector:\n app: envoy\n ports:\n - name: web\n port: 8000\n protocol: TCP\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: ingress-wildcard-host\nspec:\n rules:\n - host: talker-api.127.0.0.1.nip.io\n http:\n paths:\n - backend:\n service:\n name: envoy\n port: { number: 8000 }\n path: /\n pathType: Prefix\nEOF\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#create-the-authconfig","title":"\u277b Create the AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"mtls\":\n x509:\n selector:\n matchLabels:\n app: talker-api\n authorization:\n \"acme\":\n patternMatching:\n patterns:\n - selector: auth.identity.Organization\n operator: incl\n value: ACME Inc.\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a TLS certificate signed by the trusted CA:
openssl genrsa -out /tmp/aisha.key 2048\nopenssl req -new -subj \"/CN=aisha/C=PK/L=Islamabad/O=ACME Inc./OU=Engineering\" -key /tmp/aisha.key -out /tmp/aisha.csr\nopenssl x509 -req -sha256 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt\n\ncurl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
With a TLS certificate signed by the trusted CA, though missing an authorized Organization:
openssl genrsa -out /tmp/john.key 2048\nopenssl req -new -subj \"/CN=john/C=UK/L=London\" -key /tmp/john.key -out /tmp/john.csr\nopenssl x509 -req -sha256 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt\n\ncurl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#try-the-authconfig-via-raw-http-authorization-interface","title":"\u277d Try the AuthConfig via raw HTTP authorization interface","text":"Expose Authorino's raw HTTP authorization to the local host:
kubectl port-forward service/authorino-authorino-authorization 5001:5001 &\n
With a TLS certificate signed by the trusted CA:
curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 200\n
With a TLS certificate signed by an unknown authority:
openssl req -x509 -sha256 -nodes \\\n -days 365 \\\n -newkey rsa:2048 \\\n -subj \"/CN=untrusted\" \\\n -addext basicConstraints=CA:TRUE \\\n -addext keyUsage=digitalSignature,keyCertSign \\\n -keyout /tmp/untrusted-ca.key \\\n -out /tmp/untrusted-ca.crt\n\nopenssl genrsa -out /tmp/niko.key 2048\nopenssl req -new -subj \"/CN=niko/C=JP/L=Osaka\" -key /tmp/niko.key -out /tmp/niko.csr\nopenssl x509 -req -sha256 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt\n\ncurl -k --cert /tmp/niko.crt --key /tmp/niko.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 401\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#revoke-an-entire-chain-of-certificates","title":"\u277e Revoke an entire chain of certificates","text":"kubectl delete secret/talker-api-ca\n
Even if the deleted root certificate is still cached and accepted at the gateway, Authorino will revoke access at application level immediately.
Try with a previously accepted certificate:
curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#cleanup","title":"Cleanup","text":"kind delete cluster --name authorino-tutorial\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/","title":"User guide: OAuth 2.0 token introspection (RFC 7662)","text":"Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 OAuth 2.0 introspection
- Authorization \u2192 Pattern-matching authorization
Authorino can perform OAuth 2.0 token introspection (RFC 7662) on the access tokens supplied in the requests to protected APIs. This is particularly useful when using opaque tokens, for remote checking the token validity and resolving the identity object.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- OAuth 2.0 server that implements the token introspection endpoint (RFC 7662) (e.g. Keycloak or a12n-server)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy a Keycloak server preloaded with the realm settings required for this guide:
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Deploy an a12n-server server preloaded with all settings required for this guide:
kubectl create namespace a12n-server\nkubectl -n a12n-server apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create the required secrets that will be used by Authorino to authenticate with Keycloak and a12n-server during the introspection request:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: oauth2-token-introspection-credentials-keycloak\nstringData:\n clientID: talker-api\n clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: oauth2-token-introspection-credentials-a12n-server\nstringData:\n clientID: talker-api\n clientSecret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g\ntype: Opaque\nEOF\n
Create the Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak\":\n oauth2Introspection:\n endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token/introspect\n tokenTypeHint: requesting_party_token\n credentialsRef:\n name: oauth2-token-introspection-credentials-keycloak\n \"a12n-server\":\n oauth2Introspection:\n endpoint: http://a12n-server.a12n-server.svc.cluster.local:8531/introspect\n credentialsRef:\n name: oauth2-token-introspection-credentials-a12n-server\n authorization:\n \"can-read\":\n when:\n - selector: auth.identity.privileges\n operator: neq\n value: \"\"\n patternMatching:\n patterns:\n - selector: auth.identity.privileges.talker-api\n operator: incl\n value: read\nEOF\n
On every request, Authorino will try to verify the token remotely with the Keycloak server and the a12n-server server.
For authorization, whenever the introspected token data includes a privileges
property (returned by a12n-server), Authorino will enforce only consumers whose privileges.talker-api
includes the \"read\"
permission are granted access.
Check out the docs for information about the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-keycloak-and-consume-the-api","title":"Obtain an access token with Keycloak and consume the API","text":"Obtain an access token with the Keycloak server for user Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As user Jane, consume the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-a12n-server-and-consume-the-api","title":"Obtain an access token with a12n-server and consume the API","text":"Forward local requests to a12n-server instance running in the cluster:
kubectl -n a12n-server port-forward deployment/a12n-server 8531:8531 2>&1 >/dev/null &\n
Obtain an access token with the a12n-server server for service account service-account-1
:
ACCESS_TOKEN=$(curl -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/token\" | jq -r .access_token)\n
You can as well obtain an access token from within the cluster, in case your a12n-server is not reachable from the outside:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://a12n-server.a12n-server.svc.cluster.local:8531/token -s -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s | jq -r .access_token)\n
Verify the issued token is an opaque access token in this case:
echo $ACCESS_TOKEN\n
As service-account-1
, consumer the API with a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
curl -d \"token=$ACCESS_TOKEN\" -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/revoke\" -i\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#consume-the-api-with-a-missing-or-invalid-access-token","title":"Consume the API with a missing or invalid access token","text":"curl -H \"Authorization: Bearer invalid\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete secret/oauth2-token-introspection-credentials-keycloak\nkubectl delete secret/oauth2-token-introspection-credentials-a12n-server\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\nkubectl delete namespace a12n-server\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/observability/","title":"Observability","text":""},{"location":"authorino/docs/user-guides/observability/#metrics","title":"Metrics","text":"Authorino exports metrics at 2 endpoints:
/metrics Metrics of the controller-runtime about reconciliation (caching) of AuthConfigs and API key Secrets /server-metrics Metrics of the external authorization gRPC and OIDC/Festival Wristband validation built-in HTTP servers The Authorino Operator creates a Kubernetes Service
named <authorino-cr-name>-controller-metrics
that exposes the endpoints on port 8080. The Authorino instance allows to modify the port number of the metrics endpoints, by setting the --metrics-addr
command-line flag (default: :8080
).
Main metrics exported by endpoint1:
Endpoint: /metrics
Metric name Description\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 Labels Type controller_runtime_reconcile_total Total number of reconciliations per controller controller=authconfig|secret
, result=success|error|requeue
counter controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller controller=authconfig|secret
counter controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller controller=authconfig|secret
histogram controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller controller=authconfig|secret
gauge workqueue_adds_total Total number of adds handled by workqueue name=authconfig|secret
counter workqueue_depth Current depth of workqueue name=authconfig|secret
gauge workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested name=authconfig|secret
histogram workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running. name=authconfig|secret
gauge workqueue_retries_total Total number of retries handled by workqueue name=authconfig|secret
counter workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. name=authconfig|secret
gauge workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes. name=authconfig|secret
histogram rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. code=200|404
, method=GET|PUT|POST
counter Endpoint: /server-metrics
Metric name Description Labels Type auth_server_evaluator_total2 Total number of evaluations of individual authconfig rule performed by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_cancelled2 Number of evaluations of individual authconfig rule cancelled by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_ignored2 Number of evaluations of individual authconfig rule ignored by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_denied2 Number of denials from individual authconfig rule evaluated by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_duration_seconds2 Response latency of individual authconfig rule evaluated by the auth server (in seconds). namespace
, authconfig
, evaluator_type
, evaluator_name
histogram auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig. namespace
, authconfig
counter auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig. namespace
, authconfig
, status=OK|UNAUTHENTICATED,PERMISSION_DENIED
counter auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds). namespace
, authconfig
histogram auth_server_response_status Response status of authconfigs sent by the auth server. status=OK|UNAUTHENTICATED,PERMISSION_DENIED|NOT_FOUND
counter grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure. grpc_code=OK|Aborted|Canceled|DeadlineExceeded|Internal|ResourceExhausted|Unknown
, grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_handling_seconds Response latency (seconds) of gRPC that had been application-level handled by the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
histogram grpc_server_msg_received_total Total number of RPC stream messages received on the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_started_total Total number of RPCs started on the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter http_server_handled_total Total number of calls completed on the raw HTTP authorization server, regardless of success or failure. http_code
counter http_server_handling_seconds Response latency (seconds) of raw HTTP authorization request that had been application-level handled by the server. histogram oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server. namespace
, authconfig
, wristband
, path=oidc-config|jwks
counter oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server. status=200|404
counter 1 Both endpoints export metrics about the Go runtime, such as number of goroutines (go_goroutines) and threads (go_threads), usage of CPU, memory and GC stats.
2 Opt-in metrics: auth_server_evaluator_*
metrics require authconfig.spec.(identity|metadata|authorization|response).metrics: true
(default: false
). This can be enforced for the entire instance (all AuthConfigs and evaluators), by setting the --deep-metrics-enabled
command-line flag in the Authorino deployment.
Example of metrics exported at the /metrics
endpoint # HELP controller_runtime_active_workers Number of currently used workers per controller\n# TYPE controller_runtime_active_workers gauge\ncontroller_runtime_active_workers{controller=\"authconfig\"} 0\ncontroller_runtime_active_workers{controller=\"secret\"} 0\n# HELP controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller\n# TYPE controller_runtime_max_concurrent_reconciles gauge\ncontroller_runtime_max_concurrent_reconciles{controller=\"authconfig\"} 1\ncontroller_runtime_max_concurrent_reconciles{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller\n# TYPE controller_runtime_reconcile_errors_total counter\ncontroller_runtime_reconcile_errors_total{controller=\"authconfig\"} 12\ncontroller_runtime_reconcile_errors_total{controller=\"secret\"} 0\n# HELP controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller\n# TYPE controller_runtime_reconcile_time_seconds histogram\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.01\"} 11\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.025\"} 17\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.05\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.15\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.35\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.45\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.6\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.7\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.8\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.9\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.75\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"5\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"6\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"7\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"8\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"9\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"10\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"15\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"20\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"25\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"30\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"40\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"50\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"60\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"+Inf\"} 19\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"authconfig\"} 5.171108321999999\ncontroller_runtime_reconcile_time_seconds_count{controller=\"authconfig\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.01\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.025\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.05\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.35\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.45\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.75\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"10\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"20\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"30\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"40\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"50\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"60\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"+Inf\"} 1\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"secret\"} 0.000138025\ncontroller_runtime_reconcile_time_seconds_count{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_total Total number of reconciliations per controller\n# TYPE controller_runtime_reconcile_total counter\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"error\"} 12\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"success\"} 7\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"error\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"success\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 13\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 13\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000140699\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000313162\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003671076\ngo_gc_duration_seconds_count 13\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6357\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 45065\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 128306\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 128327\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.5021512e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 128327\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.5021512e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 128327\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3885\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 33418\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 96417\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 96425\ngo_gc_heap_frees_by_size_bytes_total_sum 9.880944e+06\ngo_gc_heap_frees_by_size_bytes_total_count 96425\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.880944e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 96425\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.356624e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31902\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 11750\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 26\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 26\ngo_gc_pauses_seconds_total_sum 0.003151488\ngo_gc_pauses_seconds_total_count 26\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 80\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 589824\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.140568e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 4.005888e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.0602e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 17104\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 113968\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.140568e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.5021512e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 108175\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.140568e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.595712e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.200768e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31902\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 4.005888e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461572121033354e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 140077\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 113968\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.356624e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 80\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 244\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 244\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 2336\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 2336\ngo_sched_latencies_seconds_sum 0.18509832400000004\ngo_sched_latencies_seconds_count 2336\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.84\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.3728896e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.\n# TYPE rest_client_requests_total counter\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"GET\"} 114\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"PUT\"} 4\n# HELP workqueue_adds_total Total number of adds handled by workqueue\n# TYPE workqueue_adds_total counter\nworkqueue_adds_total{name=\"authconfig\"} 19\nworkqueue_adds_total{name=\"secret\"} 1\n# HELP workqueue_depth Current depth of workqueue\n# TYPE workqueue_depth gauge\nworkqueue_depth{name=\"authconfig\"} 0\nworkqueue_depth{name=\"secret\"} 0\n# HELP workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running.\n# TYPE workqueue_longest_running_processor_seconds gauge\nworkqueue_longest_running_processor_seconds{name=\"authconfig\"} 0\nworkqueue_longest_running_processor_seconds{name=\"secret\"} 0\n# HELP workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested\n# TYPE workqueue_queue_duration_seconds histogram\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 8\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_queue_duration_seconds_sum{name=\"authconfig\"} 4.969016371\nworkqueue_queue_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_queue_duration_seconds_sum{name=\"secret\"} 4.67e-06\nworkqueue_queue_duration_seconds_count{name=\"secret\"} 1\n# HELP workqueue_retries_total Total number of retries handled by workqueue\n# TYPE workqueue_retries_total counter\nworkqueue_retries_total{name=\"authconfig\"} 12\nworkqueue_retries_total{name=\"secret\"} 0\n# HELP workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases.\n# TYPE workqueue_unfinished_work_seconds gauge\nworkqueue_unfinished_work_seconds{name=\"authconfig\"} 0\nworkqueue_unfinished_work_seconds{name=\"secret\"} 0\n# HELP workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes.\n# TYPE workqueue_work_duration_seconds histogram\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 11\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_work_duration_seconds_sum{name=\"authconfig\"} 5.171738079000001\nworkqueue_work_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_work_duration_seconds_sum{name=\"secret\"} 0.000150956\nworkqueue_work_duration_seconds_count{name=\"secret\"} 1\n
Example of metrics exported at the /server-metrics
endpoint # HELP auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds).\n# TYPE auth_server_authconfig_duration_seconds histogram\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.051000000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.101\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.15100000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.201\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.251\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.301\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.351\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.40099999999999997\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.45099999999999996\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.501\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.551\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6010000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6510000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7010000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7510000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8010000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8510000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9010000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9510000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"+Inf\"} 1\nauth_server_authconfig_duration_seconds_sum{authconfig=\"edge-auth\",namespace=\"authorino\"} 0.001701795\nauth_server_authconfig_duration_seconds_count{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.051000000000000004\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.101\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.15100000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.201\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.251\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.301\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.351\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.40099999999999997\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.45099999999999996\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.501\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.551\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6010000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6510000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7010000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7510000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8010000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8510000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9010000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9510000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"+Inf\"} 5\nauth_server_authconfig_duration_seconds_sum{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 0.26967658299999997\nauth_server_authconfig_duration_seconds_count{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_response_status counter\nauth_server_authconfig_response_status{authconfig=\"edge-auth\",namespace=\"authorino\",status=\"OK\"} 1\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"OK\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"PERMISSION_DENIED\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"UNAUTHENTICATED\"} 1\n# HELP auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_total counter\nauth_server_authconfig_total{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_total{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_evaluator_duration_seconds Response latency of individual authconfig rule evaluated by the auth server (in seconds).\n# TYPE auth_server_evaluator_duration_seconds histogram\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.051000000000000004\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.101\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.15100000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.201\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.251\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.301\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.351\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.40099999999999997\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.45099999999999996\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.501\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.551\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6010000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6510000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7010000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7510000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8010000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8510000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9010000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9510000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"+Inf\"} 4\nauth_server_evaluator_duration_seconds_sum{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 0.25800055\nauth_server_evaluator_duration_seconds_count{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_evaluator_total Total number of evaluations of individual authconfig rule performed by the auth server.\n# TYPE auth_server_evaluator_total counter\nauth_server_evaluator_total{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_response_status Response status of authconfigs sent by the auth server.\n# TYPE auth_server_response_status counter\nauth_server_response_status{status=\"NOT_FOUND\"} 1\nauth_server_response_status{status=\"OK\"} 3\nauth_server_response_status{status=\"PERMISSION_DENIED\"} 2\nauth_server_response_status{status=\"UNAUTHENTICATED\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 11\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 11\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000158594\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000324091\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003546711\ngo_gc_duration_seconds_count 11\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6261\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 42477\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 122133\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 122154\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.455944e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 122154\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.455944e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 122154\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3789\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 31067\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 91013\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 91021\ngo_gc_heap_frees_by_size_bytes_total_sum 9.399936e+06\ngo_gc_heap_frees_by_size_bytes_total_count 91021\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.399936e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 91021\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.601744e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31133\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 9866\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 22\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 22\ngo_gc_pauses_seconds_total_sum 0.0030393599999999996\ngo_gc_pauses_seconds_total_count 22\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 79\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 630784\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.159504e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 3.858432e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.14776e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 16696\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 114376\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.159504e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.455944e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 100887\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.159504e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.489216e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.307264e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31133\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 3.858432e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461569717723043e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 132020\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 114376\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.601744e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 79\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 225\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 225\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 1916\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 1916\ngo_sched_latencies_seconds_sum 0.18081453600000003\ngo_sched_latencies_seconds_count 1916\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure.\n# TYPE grpc_server_handled_total counter\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_handling_seconds Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.\n# TYPE grpc_server_handling_seconds histogram\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.005\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.01\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.025\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.05\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.1\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.25\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"1\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"2.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"10\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"+Inf\"} 7\ngrpc_server_handling_seconds_sum{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0.277605516\ngrpc_server_handling_seconds_count{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\n# HELP grpc_server_msg_received_total Total number of RPC stream messages received on the server.\n# TYPE grpc_server_msg_received_total counter\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_received_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server.\n# TYPE grpc_server_msg_sent_total counter\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_sent_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_started_total Total number of RPCs started on the server.\n# TYPE grpc_server_started_total counter\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_started_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server.\n# TYPE oidc_server_requests_total counter\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-configuration\",wristband=\"wristband\"} 1\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-connect/certs\",wristband=\"wristband\"} 1\n# HELP oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server.\n# TYPE oidc_server_response_status counter\noidc_server_response_status{status=\"200\"} 2\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.42\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.370432e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n# TYPE promhttp_metric_handler_requests_in_flight gauge\npromhttp_metric_handler_requests_in_flight 1\n# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n# TYPE promhttp_metric_handler_requests_total counter\npromhttp_metric_handler_requests_total{code=\"200\"} 1\npromhttp_metric_handler_requests_total{code=\"500\"} 0\npromhttp_metric_handler_requests_total{code=\"503\"} 0\n
"},{"location":"authorino/docs/user-guides/observability/#readiness-check","title":"Readiness check","text":"Authorino exposes two main endpoints for health and readiness check of the AuthConfig controller:
/healthz
: Health probe (ping) \u2013 reports \"ok\" if the controller is healthy. /readyz
: Readiness probe \u2013 reports \"ok\" if the controller is ready to reconcile AuthConfig-related events.
In general, the endpoints return either 200
(\"ok\", i.e. all checks have passed) or 500
(when one or more checks failed).
The default binding network address is :8081
, which can be changed by setting the command-line flag --health-probe-addr
.
The following additional subpath is available and its corresponding check can be aggregated into the response from the main readiness probe:
/readyz/authconfigs
: Aggregated readiness status of the AuthConfigs \u2013 reports \"ok\" if all AuthConfigs watched by the reconciler have been marked as ready.
Important!The AuthConfig readiness check within the scope of the aggregated readiness probe endpoint is deactivated by default \u2013 i.e. this check is an opt-in check. Sending a request to the /readyz
endpoint without explicitly opting-in for the AuthConfigs check, by using the include
parameter, will result in a response message that disregards the actual status of the watched AuthConfigs, possibly an \"ok\" message. To read the aggregated status of the watched AuthConfigs, either use the specific endpoint /readyz/authconfigs
or opt-in for the check in the aggregated endpoint by sending a request to /readyz?include=authconfigs
Apart from include
to add the aggregated status of the AuthConfigs, the following additional query string parameters are available:
verbose=true|false
- provides more verbose response messages; exclude=(check name)
\u2013 to exclude a particular readiness check (for future usage).
"},{"location":"authorino/docs/user-guides/observability/#logging","title":"Logging","text":"Authorino provides structured log messages (\"production\") or more log messages output to stdout in a more user-friendly format (\"development\" mode) and different level of logging.
"},{"location":"authorino/docs/user-guides/observability/#log-levels-and-log-modes","title":"Log levels and log modes","text":"Authorino outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information of the gRPC and HTTP authorization services, limiting messages to incoming request and respective outgoing response logs, with reduced details about the corresponding objects (request payload and authorization result), and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details of each Auth Pipeline, such as intermediary requests to validate identities with external auth servers, requests to external sources of auth metadata or authorization policies.
To configure the desired log level, set the spec.logLevel
field of the Authorino
custom resource (or --log-level
command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is info
.
Apart from log level, Authorino can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the spec.logMode
field of the Authorino
custom resource (or --log-mode
command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is production
.
Example of Authorino
custom resource with log level debug
and log mode production
:
apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n logLevel: debug\n logMode: production\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\n
"},{"location":"authorino/docs/user-guides/observability/#sensitive-data-output-to-the-logs","title":"Sensitive data output to the logs","text":"Authorino will never output HTTP headers and query string parameters to info
log messages, as such values usually include sensitive data (e.g. access tokens, API keys and Authorino Festival Wristbands). However, debug
log messages may include such sensitive information and those are not redacted.
Therefore, DO NOT USE debug
LOG LEVEL IN PRODUCTION! Instead, use either info
or error
.
"},{"location":"authorino/docs/user-guides/observability/#log-messages-printed-by-authorino","title":"Log messages printed by Authorino","text":"Some log messages printed by Authorino and corresponding extra values included:
logger level message extra values authorino
info
\"setting instance base logger\" min level=info\\|debug
, mode=production\\|development
authorino
info
\"booting up authorino\" version
authorino
debug
\"setting up with options\" auth-config-label-selector
, deep-metrics-enabled
, enable-leader-election
, evaluator-cache-size
, ext-auth-grpc-port
, ext-auth-http-port
, health-probe-addr
, log-level
, log-mode
, max-http-request-body-size
, metrics-addr
, oidc-http-port
, oidc-tls-cert
, oidc-tls-cert-key
, secret-label-selector
, timeout
, tls-cert
, tls-cert-key
, watch-namespace
authorino
info
\"attempting to acquire leader lease <namespace>/cb88a58a.authorino.kuadrant.io...\\n\" authorino
info
\"successfully acquired lease <namespace>/cb88a58a.authorino.kuadrant.io\\n\" authorino
info
\"disabling grpc auth service\" authorino
info
\"starting grpc auth service\" port
, tls
authorino
error
\"failed to obtain port for the grpc auth service\" authorino
error
\"failed to load tls cert for the grpc auth\" authorino
error
\"failed to start grpc auth service\" authorino
info
\"disabling http auth service\" authorino
info
\"starting http auth service\" port
, tls
authorino
error
\"failed to obtain port for the http auth service\" authorino
error
\"failed to start http auth service\" authorino
info
\"disabling http oidc service\" authorino
info
\"starting http oidc service\" port
, tls
authorino
error
\"failed to obtain port for the http oidc service\" authorino
error
\"failed to start http oidc service\" authorino
info
\"starting manager\" authorino
error
\"unable to start manager\" authorino
error
\"unable to create controller\" controller=authconfig\\|secret\\|authconfigstatusupdate
authorino
error
\"problem running manager\" authorino
info
\"starting status update manager\" authorino
error
\"unable to start status update manager\" authorino
error
\"problem running status update manager\" authorino.controller-runtime.metrics
info
\"metrics server is starting to listen\" addr
authorino.controller-runtime.manager
info
\"starting metrics server\" path
authorino.controller-runtime.manager.events
debug
\"Normal\" object={kind=ConfigMap, apiVersion=v1}
, reauthorino.ason=LeaderElection
, message=\"authorino-controller-manager-* became leader\"
authorino.controller-runtime.manager.events
debug
\"Normal\" object={kind=Lease, apiVersion=coordination.k8s.io/v1}
, reauthorino.ason=LeaderElection
, message=\"authorino-controller-manager-* became leader\"
authorino.controller-runtime.manager.controller.authconfig
info
\"resource reconciled\" authconfig
authorino.controller-runtime.manager.controller.authconfig
info
\"host already taken\" authconfig
, host
authorino.controller-runtime.manager.controller.authconfig.statusupdater
debug
\"resource status did not change\" authconfig
authorino.controller-runtime.manager.controller.authconfig.statusupdater
debug
\"resource status changed\" authconfig
, authconfig/status
authorino.controller-runtime.manager.controller.authconfig.statusupdater
error
\"failed to update the resource\" authconfig
authorino.controller-runtime.manager.controller.authconfig.statusupdater
info
\"resource status updated\" authconfig
authorino.controller-runtime.manager.controller.secret
info
\"resource reconciled\" authorino.controller-runtime.manager.controller.secret
info
\"could not reconcile authconfigs using api key authorino.authentication\" authorino.service.oidc
info
\"request received\" request id
, url
, realm
, config
, path
authorino.service.oidc
info
\"response sent\" request id
authorino.service.oidc
error
\"failed to serve oidc request\" authorino.service.auth
info
\"incoming authorization request\" request id
, object
authorino.service.auth
debug
\"incoming authorization request\" request id
, object
authorino.service.auth
info
\"outgoing authorization response\" request id
, authorized
, response
, object
authorino.service.auth
debug
\"outgoing authorization response\" request id
, authorized
, response
, object
authorino.service.auth
error
\"failed to create dynamic metadata\" request id
, object
authorino.service.auth.authpipeline
debug
\"skipping config\" request id
, config
, reason
authorino.service.auth.authpipeline.identity
debug
\"identity validated\" request id
, config
, object
authorino.service.auth.authpipeline.identity
debug
\"cannot validate identity\" request id
, config
, reason
authorino.service.auth.authpipeline.identity
error
\"failed to extend identity object\" request id
, config
, object
authorino.service.auth.authpipeline.identity.oidc
error
\"failed to discovery openid connect configuration\" endpoint
authorino.service.auth.authpipeline.identity.oidc
debug
\"auto-refresh of openid connect configuration disabled\" endpoint
, reason
authorino.service.auth.authpipeline.identity.oidc
debug
\"openid connect configuration updated\" endpoint
authorino.service.auth.authpipeline.identity.oauth2
debug
\"sending token introspection request\" request id
, url
, data
authorino.service.auth.authpipeline.identity.kubernetesauth
debug
\"calling kubernetes token review api\" request id
, tokenreview
authorino.service.auth.authpipeline.identity.apikey
error
\"Something went wrong fetching the authorized credentials\" authorino.service.auth.authpipeline.metadata
debug
\"fetched auth metadata\" request id
, config
, object
authorino.service.auth.authpipeline.metadata
debug
\"cannot fetch metadata\" request id
, config
, reason
authorino.service.auth.authpipeline.metadata.http
debug
\"sending request\" request id
, method
, url
, headers
authorino.service.auth.authpipeline.metadata.userinfo
debug
\"fetching user info\" request id
, endpoint
authorino.service.auth.authpipeline.metadata.uma
debug
\"requesting pat\" request id
, url
, data
, headers
authorino.service.auth.authpipeline.metadata.uma
debug
\"querying resources by uri\" request id
, url
authorino.service.auth.authpipeline.metadata.uma
debug
\"getting resource data\" request id
, url
authorino.service.auth.authpipeline.authorization
debug
\"evaluating for input\" request id
, input
authorino.service.auth.authpipeline.authorization
debug
\"access granted\" request id
, config
, object
authorino.service.auth.authpipeline.authorization
debug
\"access denied\" request id
, config
, reason
authorino.service.auth.authpipeline.authorization.opa
error
\"invalid response from policy evaluation\" policy
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to precompile policy\" policy
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to download policy from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to refresh policy from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
debug
\"external policy unchanged\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
debug
\"auto-refresh of external policy disabled\" policy
, endpoint
, reason
authorino.service.auth.authpipeline.authorization.opa
info
\"policy updated from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.kubernetesauthz
debug
\"calling kubernetes subject access review api\" request id
, subjectaccessreview
authorino.service.auth.authpipeline.response
debug
\"dynamic response built\" request id
, config
, object
authorino.service.auth.authpipeline.response
debug
\"cannot build dynamic response\" request id
, config
, reason
authorino.service.auth.http
debug
\"bad request\" request id
authorino.service.auth.http
debug
\"not found\" request id
authorino.service.auth.http
debug
\"request body too large\" request id
authorino.service.auth.http
debug
\"service unavailable\" request id
"},{"location":"authorino/docs/user-guides/observability/#examples","title":"Examples","text":"The examples below are all with --log-level=debug
and --log-mode=production
.
Booting up the service {\"level\":\"info\",\"ts\":1669220526.929678,\"logger\":\"authorino\",\"msg\":\"setting instance base logger\",\"min level\":\"debug\",\"mode\":\"production\"}\n{\"level\":\"info\",\"ts\":1669220526.929718,\"logger\":\"authorino\",\"msg\":\"booting up authorino\",\"version\":\"7688cfa32317a49f0461414e741c980e9c05dba3\"}\n{\"level\":\"debug\",\"ts\":1669220526.9297278,\"logger\":\"authorino\",\"msg\":\"setting up with options\",\"auth-config-label-selector\":\"\",\"deep-metrics-enabled\":\"false\",\"enable-leader-election\":\"false\",\"evaluator-cache-size\":\"1\",\"ext-auth-grpc-port\":\"50051\",\"ext-auth-http-port\":\"5001\",\"health-probe-addr\":\":8081\",\"log-level\":\"debug\",\"log-mode\":\"production\",\"max-http-request-body-size\":\"8192\",\"metrics-addr\":\":8080\",\"oidc-http-port\":\"8083\",\"oidc-tls-cert\":\"/etc/ssl/certs/oidc.crt\",\"oidc-tls-cert-key\":\"/etc/ssl/private/oidc.key\",\"secret-label-selector\":\"authorino.kuadrant.io/managed-by=authorino\",\"timeout\":\"0\",\"tls-cert\":\"/etc/ssl/certs/tls.crt\",\"tls-cert-key\":\"/etc/ssl/private/tls.key\",\"watch-namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669220527.9816976,\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\":8080\"}\n{\"level\":\"info\",\"ts\":1669220527.9823213,\"logger\":\"authorino\",\"msg\":\"starting grpc auth service\",\"port\":50051,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9823658,\"logger\":\"authorino\",\"msg\":\"starting http auth service\",\"port\":5001,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9824295,\"logger\":\"authorino\",\"msg\":\"starting http oidc service\",\"port\":8083,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9825335,\"logger\":\"authorino\",\"msg\":\"starting manager\"}\n{\"level\":\"info\",\"ts\":1669220527.982721,\"logger\":\"authorino\",\"msg\":\"Starting server\",\"path\":\"/metrics\",\"kind\":\"metrics\",\"addr\":\"[::]:8080\"}\n{\"level\":\"info\",\"ts\":1669220527.982766,\"logger\":\"authorino\",\"msg\":\"Starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8081\"}\n{\"level\":\"info\",\"ts\":1669220527.9829438,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"source\":\"kind source: *v1.Secret\"}\n{\"level\":\"info\",\"ts\":1669220527.9829693,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting Controller\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669220527.9829714,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"source\":\"kind source: *v1beta1.AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220527.9830208,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting Controller\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220528.0834699,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting workers\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":1669220528.0836608,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting workers\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":1669220529.041266,\"logger\":\"authorino\",\"msg\":\"starting status update manager\"}\n{\"level\":\"info\",\"ts\":1669220529.0418258,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"source\":\"kind source: *v1beta1.AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220529.0418813,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting Controller\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220529.1432905,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting workers\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"worker count\":1}\n
Reconciling an AuthConfig and 2 related API key secrets {\"level\":\"debug\",\"ts\":1669221208.7473805,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsNotLinked\",\"message\":\"No hosts linked to the resource\"},{\"type\":\"Ready\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Unknown\"}],\"summary\":{\"ready\":false,\"hostsReady\":[],\"numHostsReady\":\"0/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7496614,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7532616,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535005,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535596,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7536132,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221208.753772,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.753835,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsLinked\"},{\"type\":\"Ready\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Reconciled\"}],\"summary\":{\"ready\":true,\"hostsReady\":[\"talker-api.127.0.0.1.nip.io\"],\"numHostsReady\":\"1/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7571108,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7573664,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.757429,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586699,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586884,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7586913,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n{\"level\":\"debug\",\"ts\":1669221208.7597604,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n
Enforcing an AuthConfig with authentication based on Kubernetes tokens: - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830460.1486168,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1491194,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830460.150506,\"logger\":\"authorino.service.auth.authpipeline.identity.kubernetesauth\",\"msg\":\"calling kubernetes token review api\",\"request id\":\"8157480586935853928\",\"tokenreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"token\":\"eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"audiences\":[\"talker-api\"]},\"status\":{\"user\":{}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1509938,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830460.1517606,\"logger\":\"authorino.service.auth.authpipeline.identity.oauth2\",\"msg\":\"sending token introspection request\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"data\":\"token=eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA&token_type_hint=requesting_party_token\"}\n{\"level\":\"debug\",\"ts\":1634830460.1620777,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"}}\n{\"level\":\"debug\",\"ts\":1634830460.1622565,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.1670353,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"8157480586935853928\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.169326,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830460.1753876,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"}}\n{\"level\":\"debug\",\"ts\":1634830460.2331996,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830460.2495668,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830460.2927864,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830460.2930083,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"8157480586935853928\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830460.2955465,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"8157480586935853928\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"system:serviceaccount:authorino:api-consumer-1\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830460.2986183,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3044975,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3052874,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3NjAsImlhdCI6MTYzNDgzMDQ2MCwiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI4NDliMDk0ZDA4MzU0ZjM0MjA4ZGI3MjBmYWZmODlmNmM3NmYyOGY3MTcxOWI4NTQ3ZDk5NWNlNzAwMjU2ZGY4In0.Jn-VB5Q_0EX1ed1ji4KvhO4DlMqZeIl5H0qlukbTyYkp-Pgb4SnPGSbYWp5_uvG8xllsFAA5nuyBIXeba-dbkw\"}\n{\"level\":\"info\",\"ts\":1634830460.3054585,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830460.305476,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n
Enforcing an AuthConfig with authentication based on API keys - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830413.2425854,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830413.2426975,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830413.2428744,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830413.2434332,\"logger\":\"authorino.service.auth.authpipeline\",\"msg\":\"skipping config\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"context canceled\"}\n{\"level\":\"debug\",\"ts\":1634830413.2479305,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"object\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"}}\n{\"level\":\"debug\",\"ts\":1634830413.248768,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"7199257136822741594\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.2496722,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830413.2497928,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"7199257136822741594\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.258932,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"}}\n{\"level\":\"debug\",\"ts\":1634830413.2945344,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830413.3123596,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830413.3340268,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830413.3367748,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"7199257136822741594\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830413.339894,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3444238,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"7199257136822741594\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"john\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830413.3547812,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3558292,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3MTMsImlhdCI6MTYzNDgzMDQxMywiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI5NjhiZjViZjk3MDM3NWRiNjE0ZDFhMDgzZTg2NTBhYTVhMGVhMzAyOTdiYmJjMTBlNWVlMWZmYTkxYTYwZmY4In0.7G440sWgi2TIaxrGJf5KWR9UOFpNTjwVYeaJXFLzsLhVNICoMLbYzBAEo4M3ym1jipxxTVeE7anm4qDDc7cnVQ\"}\n{\"level\":\"info\",\"ts\":1634830413.3569078,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830413.3569596,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n
Enforcing an AuthConfig with authentication based on API keys (invalid API key) - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830373.2066543,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830373.2068064,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830373,\"nanos\":198329000},\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY invalid\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"9e391846-afe4-489a-8716-23a2e1c1aa77\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830373.2070816,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-opaque\",\"ExtendedProperties\":[],\"OAuth2\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"TokenIntrospectionUrl\":\"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"TokenTypeHint\":\"requesting_party_token\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.207225,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"the API Key provided is invalid\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072473,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072592,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"info\",\"ts\":1634830373.2073083,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\"}}\n{\"level\":\"debug\",\"ts\":1634830373.2073889,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\",\"headers\":[{\"Location\":\"https://my-app.io/login\"}]}}\n
Deleting an AuthConfig and 2 related API key secrets {\"level\":\"info\",\"ts\":1669221361.5032296,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221361.5057878,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n
Shutting down the service {\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136683,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136883,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0137057,\"logger\":\"authorino.controller.secret\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.013724,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.01375,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.013752,\"logger\":\"authorino.controller.secret\",\"msg\":\"All workers finished\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.0137632,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.013751,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137684,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137722,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.0138857,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0138955,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n{\"level\":\"info\",\"ts\":1669221635.0138893,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0139785,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n
"},{"location":"authorino/docs/user-guides/observability/#tracing","title":"Tracing","text":""},{"location":"authorino/docs/user-guides/observability/#request-id","title":"Request ID","text":"Processes related to the authorization request are identified and linked together by a request ID. The request ID can be:
- generated outside Authorino and passed in the authorization request \u2013 this is essentially the case of requests via GRPC authorization interface initiated by the Envoy;
- generated by Authorino \u2013 requests via Raw HTTP Authorization interface.
"},{"location":"authorino/docs/user-guides/observability/#propagation","title":"Propagation","text":"Authorino propagates trace identifiers compatible with the W3C Trace Context format https://www.w3.org/TR/trace-context/ and user-defined baggage data in the W3C Baggage format https://www.w3.org/TR/baggage.
"},{"location":"authorino/docs/user-guides/observability/#log-tracing","title":"Log tracing","text":"Most log messages associated with an authorization request include the request id
value. This value can be used to match incoming request and corresponding outgoing response log messages, including at deep level when more fine-grained log details are enabled (debug
level level).
"},{"location":"authorino/docs/user-guides/observability/#opentelemetry-integration","title":"OpenTelemetry integration","text":"Integration with an OpenTelemetry collector can be enabled by supplying the --tracing-service-endpoint
command-line flag (e.g. authorino server --tracing-service-endpoint=http://jaeger:14268/api/traces
).
The additional --tracing-service-tags
command-line flag allow to specify fixed agent-level key-value tags for the trace signals emitted by Authorino (e.g. authorino server --tracing-service-endpoint=... --tracing-service-tag=key1=value1 --tracing-service-tag=key2=value2
).
Traces related to authorization requests are additionally tagged with the authorino.request_id
attribute.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/","title":"User guide: OpenID Connect Discovery and authentication with JWTs","text":"Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
Authorino validates JSON Web Tokens (JWT) issued by an OpenID Connect server that implements OpenID Connect Discovery. Authorino fetches the OpenID Connect configuration and JSON Web Key Set (JWKS) from the issuer endpoint, and verifies the JSON Web Signature (JWS) and time validity of the token.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
With missing or invalid access token:
curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-kuadrant-realm\"\n# x-ext-auth-reason: credential not found\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oidc-rbac/","title":"User guide: OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak","text":"Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.
In this user guide, you will learn via example how to implement a simple Role-Based Access Control (RBAC) system to protect endpoints of an API, with roles assigned to users of an Identity Provider (Keycloak) and carried within the access tokens as JSON Web Token (JWT) claims. Users authenticate with the IdP via OAuth2/OIDC flow and get their access tokens verified and validated by Authorino on every request. Moreover, Authorino reads the role bindings of the user and enforces the proper RBAC rules based upon the context.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-rbac/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-rbac/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, the Keycloak realm defines a few users and 2 realm roles: 'member' and 'admin'. When users authenticate to the Keycloak server by any of the supported OAuth2/OIDC flows, Keycloak adds to the access token JWT a claim \"realm_access\": { \"roles\": array }
that holds the list of roles assigned to the user. Authorino will verify the JWT on requests to the API and read from that claim to enforce the following RBAC rules:
Path Method Role /resources[/*] GET / POST / PUT member /resources/{id} DELETE admin /admin[/*] * admin Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Apply the AuthConfig:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n\n patterns:\n \"member-role\":\n\n - selector: auth.identity.realm_access.roles\n operator: incl\n value: member\n \"admin-role\":\n - selector: auth.identity.realm_access.roles\n operator: incl\n value: admin\n\n authorization:\n # RBAC rule: 'member' role required for requests to /resources[/*]\n \"rbac-resources-api\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources(/.*)?$\n patternMatching:\n patterns:\n - patternRef: member-role\n\n # RBAC rule: 'admin' role required for DELETE requests to /resources/{id}\n \"rbac-delete-resource\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/\\d+$\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n patternMatching:\n patterns:\n - patternRef: admin-role\n\n # RBAC rule: 'admin' role required for requests to /admin[/*]\n \"rbac-admin-api\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/admin(/.*)?$\n patternMatching:\n patterns:\n - patternRef: admin-role\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user John, who is assigned to the 'member' role:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, send a GET
request to /resources:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n
As John, send a DELETE
request to /resources/123:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 403 Forbidden\n
As John, send a GET
request to /admin/settings:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-jane-memberadmin","title":"Obtain an access token and consume the API as Jane (member/admin)","text":"Obtain an access token from within the cluster for the user Jane, who is assigned to the 'member' and 'admin' roles:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send a GET
request to /resources:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n
As Jane, send a DELETE
request to /resources/123:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 200 OK\n
As Jane, send a GET
request to /admin/settings:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oidc-user-info/","title":"User guide: OpenID Connect UserInfo","text":"Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.
Authorino capabilities featured in this guide: - External auth metadata \u2192 OIDC UserInfo
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline. Implementation requires an OpenID Connect issuer (spec.identity.oidc
) configured in the same AuthConfig
.
Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"userinfo\":\n userInfo:\n identitySource: keycloak-kuadrant-realm\n authorization:\n \"active-tokens-only\":\n patternMatching:\n patterns:\n - selector: \"auth.metadata.userinfo.email\" # user email expected from the userinfo instead of the jwt\n operator: neq\n value: \"\"\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#consume-the-api","title":"\u277c Consume the API","text":"With a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/opa-authorization/","title":"User guide: Open Policy Agent (OPA) Rego policies","text":"Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.
Authorino capabilities featured in this guide: - Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Identity verification & authentication \u2192 API key
Authorino supports Open Policy Agent policies, either inline defined in Rego language as part of the AuthConfig
or fetched from an external endpoint, such as an OPA Policy Registry.
Authorino's built-in OPA module precompiles the policies in reconciliation-time and cache them for fast evaluation in request-time, where they receive the Authorization JSON as input.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/opa-authorization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/opa-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, we will use OPA to implement a read-only policy for requests coming from outside a trusted network (IP range 192.168.1/24).
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.5
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"read-only-outside\":\n opa:\n rego: |\n ips := split(input.context.request.http.headers[\"x-forwarded-for\"], \",\")\n trusted_network { net.cidr_contains(\"192.168.1.1/24\", ips[0]) }\n\n allow { trusted_network }\n allow { not trusted_network; input.context.request.http.method == \"GET\" }\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#create-the-api-key","title":"\u277b Create the API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#consume-the-api","title":"\u277c Consume the API","text":"Inside the trusted network:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Outside the trusted network:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
-
You can also set use_remote_address: true
in the Envoy route configuration, so the proxy will append its IP address instead of run in transparent mode. This setting will also ensure real remote address of the client connection passed in the x-envoy-external-address
HTTP header, which can be used to simplify the read-only policy in remote environment.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/passing-credentials/","title":"User guide: Passing credentials (Authorization
header, cookie headers and others)","text":"Customize where credentials are supplied in the request by each trusted source of identity.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Auth credentials
- Identity verification & authentication \u2192 API key
Authentication tokens can be supplied in the Authorization
header, in a custom header, cookie or query string parameter.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/passing-credentials/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/passing-credentials/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, member
users can authenticate supplying the API key in any of 4 different ways:
- HTTP header
Authorization: APIKEY <api-key>
- HTTP header
X-API-Key: <api-key>
- Query string parameter
api_key=<api-key>
- Cookie
Cookie: APIKEY=<api-key>;
admin
API keys are only accepted in the (default) HTTP header Authorization: Bearer <api-key>
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"members-authorization-header\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n authorizationHeader:\n prefix: APIKEY # instead of the default prefix 'Bearer'\n \"members-custom-header\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n customHeader:\n name: X-API-Key\n \"members-query-string-param\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n queryString:\n name: api_key\n \"members-cookie\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n cookie:\n name: APIKEY\n \"admins\":\n apiKey:\n selector:\n matchLabels:\n group: admins\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#create-the-api-keys","title":"\u277b Create the API keys","text":"For a member user:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: members\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
For an admin user:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: admins\nstringData:\n api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#consume-the-api","title":"\u277c Consume the API","text":"As member user, passing the API key in the Authorization
header:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the custom X-API-Key
header:
curl -H 'X-API-Key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the query string parameter api_key
:
curl \"http://talker-api.127.0.0.1.nip.io:8000/hello?api_key=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\"\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the APIKEY
cookie header:
curl -H 'Cookie: APIKEY=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx;foo=bar' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As admin user:
curl -H 'Authorization: Bearer 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Missing the API key:
curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"members-authorization-header\"\n# www-authenticate: X-API-Key realm=\"members-custom-header\"\n# www-authenticate: api_key realm=\"members-query-string-param\"\n# www-authenticate: APIKEY realm=\"members-cookie\"\n# www-authenticate: Bearer realm=\"admins\"\n# x-ext-auth-reason: {\"admins\":\"credential not found\",\"members-authorization-header\":\"credential not found\",\"members-cookie\":\"credential not found\",\"members-custom-header\":\"credential not found\",\"members-query-string-param\":\"credential not found\"}\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/","title":"User guide: Resource-level authorization with User-Managed Access (UMA) resource registry","text":"Fetch resource metadata relevant for your authorization policies from Keycloak authorization clients, using User-Managed Access (UMA) protocol.
Authorino capabilities featured in this guide: - External auth metadata \u2192 User-Managed Access (UMA) resource registry
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Open Policy Agent (OPA) Rego policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
This example of resource-level authorization leverages part of Keycloak's User-Managed Access (UMA) support. Authorino will fetch resource attributes stored in a Keycloak resource server client.
The Keycloak server also provides the identities. The sub
claim of the Keycloak-issued ID tokens must match the owner of the requested resource, identified by the URI of the request.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Create a required secret that will be used by Authorino to initiate the authentication with the UMA registry.
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: talker-api-uma-credentials\nstringData:\n clientID: talker-api\n clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\nEOF\n
Create the config:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"resource-data\":\n uma:\n endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n credentialsRef:\n name: talker-api-uma-credentials\n authorization:\n \"owned-resources\":\n opa:\n rego: |\n COLLECTIONS = [\"greetings\"]\n\n http_request = input.context.request.http\n http_method = http_request.method\n requested_path_sections = split(trim_left(trim_right(http_request.path, \"/\"), \"/\"), \"/\")\n\n get { http_method == \"GET\" }\n post { http_method == \"POST\" }\n put { http_method == \"PUT\" }\n delete { http_method == \"DELETE\" }\n\n valid_collection { COLLECTIONS[_] == requested_path_sections[0] }\n\n collection_endpoint {\n valid_collection\n count(requested_path_sections) == 1\n }\n\n resource_endpoint {\n valid_collection\n some resource_id\n requested_path_sections[1] = resource_id\n }\n\n identity_owns_the_resource {\n identity := input.auth.identity\n resource_attrs := object.get(input.auth.metadata, \"resource-data\", [])[0]\n resource_owner := object.get(object.get(resource_attrs, \"owner\", {}), \"id\", \"\")\n resource_owner == identity.sub\n }\n\n allow { get; collection_endpoint }\n allow { post; collection_endpoint }\n allow { get; resource_endpoint; identity_owns_the_resource }\n allow { put; resource_endpoint; identity_owns_the_resource }\n allow { delete; resource_endpoint; identity_owns_the_resource }\nEOF\n
The OPA policy owned-resource
above enforces that all users can send GET and POST requests to /greetings
, while only resource owners can send GET, PUT and DELETE requests to /greetings/{resource-id}
.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-access-tokens-with-the-keycloak-server-and-consume-the-api","title":"\u277b Obtain access tokens with the Keycloak server and consume the API","text":""},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-john-and-consume-the-api","title":"Obtain an access token as John and consume the API","text":"Obtain an access token for user John (owner of the resource /greetings/1
in the UMA registry):
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-jane-and-consume-the-api","title":"Obtain an access token as Jane and consume the API","text":"Obtain an access token for user Jane (owner of the resource /greetings/2
in the UMA registry):
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-peter-and-consume-the-api","title":"Obtain an access token as Peter and consume the API","text":"Obtain an access token for user Peter (does not own any resource in the UMA registry):
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete secret/talker-api-uma-credentials\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/sharding/","title":"User guide: Reducing the operational space","text":"By default, Authorino will watch events related to all AuthConfig
custom resources in the reconciliation space (namespace or entire cluster). Instances can be configured though to only watch a subset of the resources, thus allowing such as:
- to reduce noise and lower memory usage inside instances meant for restricted scope (e.g. Authorino deployed as a dedicated sidecar to protect only one host);
- sharding auth config data across multiple instances;
- multiple environments (e.g. staging, production) inside of a same cluster/namespace;
- providing managed instances of Authorino that all watch CRs cluster-wide, yet dedicated to organizations allowed to create and operate their own
AuthConfig
s across multiple namespaces.
\u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. Authorino capabilities featured in this guide: - Sharding
- Identity verification & authentication \u2192 API key
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/sharding/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
"},{"location":"authorino/docs/user-guides/sharding/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/sharding/#deploy-instances-of-authorino","title":"\u2777 Deploy instances of Authorino","text":"Deploy an instance of Authorino dedicated to AuthConfig
s and API key Secrets
labeled with authorino/environment=staging
:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino-staging\nspec:\n clusterWide: true\n authConfigLabelSelectors: authorino/environment=staging\n secretLabelSelectors: authorino/environment=staging\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
Deploy an instance of Authorino dedicated to AuthConfig
s and API key Secrets
labeled with authorino/environment=production
, ans NOT labeled disabled
:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino-production\nspec:\n clusterWide: true\n authConfigLabelSelectors: authorino/environment=production,!disabled\n secretLabelSelectors: authorino/environment=production,!disabled\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
The commands above will both request instances of Authorino that watch for AuthConfig
resources cluster-wide1, with TLS disabled2.
"},{"location":"authorino/docs/user-guides/sharding/#create-a-namespace-for-user-resources","title":"\u2778 Create a namespace for user resources","text":"kubectl create namespace myapp\n
"},{"location":"authorino/docs/user-guides/sharding/#create-authconfigs-and-api-key-secrets-for-both-instances","title":"\u2779 Create AuthConfig
s and API key Secret
s for both instances","text":""},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-staging","title":"Create resources for authorino-staging
","text":"Create an AuthConfig
:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: auth-config-1\n labels:\n authorino/environment: staging\nspec:\n hosts:\n\n - my-host.staging.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels:\n authorino/api-key: \"true\"\n authorino/environment: staging\nEOF\n
Create an API key Secret
:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino/api-key: \"true\"\n authorino/environment: staging\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
Verify in the logs that only the authorino-staging
instance adds the resources to the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-staging -o name)\n# {\"level\":\"info\",\"ts\":1638382989.8327162,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638382989.837424,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638383144.9486837,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-1\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-production","title":"Create resources for authorino-production
","text":"Create an AuthConfig
:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: auth-config-2\n labels:\n authorino/environment: production\nspec:\n hosts:\n\n - my-host.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels:\n authorino/api-key: \"true\"\n authorino/environment: production\nEOF\n
Create an API key Secret
:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino/api-key: \"true\"\n authorino/environment: production\nstringData:\n api_key: MUWdeBte7AbSWxl6CcvYNJ+3yEIm5CaL\ntype: Opaque\nEOF\n
Verify in the logs that only the authorino-production
instance adds the resources to the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383423.86086,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383423.8608105,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383460.3515081,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-2\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#remove-a-resource-from-scope","title":"\u277a Remove a resource from scope","text":"kubectl -n myapp label authconfig/auth-config-2 disabled=true\n# authconfig.authorino.kuadrant.io/auth-config-2 labeled\n
Verify in the logs that the authorino-production
instance removes the authconfig from the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383515.6428752,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource de-indexed\",\"authconfig\":\"myapp/auth-config-2\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authorino/authorino-staging\nkubectl delete authorino/authorino-production\nkubectl delete namespace myapp\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/token-normalization/","title":"User guide: Token normalization","text":"Broadly, the term token normalization in authentication systems usually implies the exchange of an authentication token, as provided by the user in a given format, and/or its associated identity claims, for another freshly issued token/set of claims, of a given (normalized) structure or format.
The most typical use-case for token normalization involves accepting tokens issued by multiple trusted sources and of often varied authentication protocols, while ensuring that the eventual different data structures adopted by each of those sources are normalized, thus allowing to simplify policies and authorization checks that depend on those values. In general, however, any modification to the identity claims can be for the purpose of normalization.
This user guide focuses on the aspect of mutation of the identity claims resolved from an authentication token, to a certain data format and/or by extending them, so that required attributes can thereafter be trusted to be present among the claims, in a desired form. For such, Authorino allows to extend resolved identity objects with custom attributes (custom claims) of either static values or with values fetched from the Authorization JSON.
For not only normalizing the identity claims for purpose of writing simpler authorization checks and policies, but also getting Authorino to issue a new token in a normalized format, check the Festival Wristband tokens feature.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Identity extension
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Check out as well the user guides about Authentication with API keys, OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/token-normalization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/token-normalization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/token-normalization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/token-normalization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/token-normalization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
This example implements a policy that only users bound to the admin
role can send DELETE
requests.
The config trusts access tokens issued by a Keycloak realm as well as API keys labeled specifically to a selected group (friends
). The roles of the identities handled by Keycloak are managed in Keycloak, as realm roles. Particularly, users john
and peter
are bound to the member
role, while user jane
is bound to roles member
and admin
. As for the users authenticating with API key, they are all bound to the admin
role.
Without normalizing identity claims from these two different sources, the policy would have to handle the differences of data formats with additional ifs-and-elses. Instead, the config here uses the identity.extendedProperties
option to ensure a custom roles
(Array) claim is always present in the identity object. In the case of Keycloak ID tokens, the value is extracted from the realm_access.roles
claim; for API key-resolved objects, the custom claim is set to the static value [\"admin\"]
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n overrides:\n \"roles\":\n selector: auth.identity.realm_access.roles\n \"api-key-friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n defaults:\n \"roles\":\n value: [\"admin\"]\n authorization:\n \"only-admins-can-delete\":\n when:\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n patternMatching:\n patterns:\n - selector: auth.identity.roles\n operator: incl\n value: admin\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
Consume the API as Jane:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
Consume the API as John:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api-using-the-api-key-to-authenticate-admin","title":"Consume the API using the API key to authenticate (admin)","text":"curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/token-normalization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/validating-webhook/","title":"User guide: Using Authorino as ValidatingWebhook service","text":"Authorino provides an interface for raw HTTP external authorization requests. This interface can be used for integrations other than the typical Envoy gRPC protocol, such as (though not limited to) using Authorino as a generic Kubernetes ValidatingWebhook service.
The rules to validate a request to the Kubernetes API \u2013 typically a POST
, PUT
or DELETE
request targeting a particular Kubernetes resource or collection \u2013, according to which either the change will be deemed accepted or not, are written in an Authorino AuthConfig
custom resource. Authentication and authorization are performed by the Kubernetes API server as usual, with auth features of Authorino implementing the additional validation within the scope of an AdmissionReview
request.
This user guide provides an example of using Authorino as a Kubernetes ValidatingWebhook service that validates requests to CREATE
and UPDATE
Authorino AuthConfig
resources. In other words, we will use Authorino as a validator inside the cluster that decides what is a valid AuthConfig for any application which wants to rely on Authorino to protect itself.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Plain
- Identity verification & authentication \u2192 Kubernetes TokenReview
- Identity verification & authentication \u2192 API key
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Kubernetes SubjectAccessReview
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Dynamic response \u2192 Festival Wristband tokens
- Common feature \u2192 Conditions
- Common feature \u2192 Priorities
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/validating-webhook/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
The Keycloak server is only needed for trying out validating AuthConfig resources that use the authentication server.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant you may already have Authorino installed and running. In this case, skip straight to step \u2778.
At step \u277a, alternatively to creating an AuthConfig
custom resource, you may create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/validating-webhook/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"Create the namespace:
kubectl create namespace authorino\n
Create the TLS certificates:
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n
Create the Authorino instance:
The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources cluster-wide2, with TLS enabled3.
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n listener:\n ports:\n grpc: 50051\n http: 5001 # for admissionreview requests sent by the kubernetes api server\n tls:\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
For convenience, the same instance of Authorino pointed as the validating webhook will also be targeted for the sample AuthConfigs created to test the validation. For using different instances of Authorino for the validating webhook and for protecting applications behind a proxy, check out the section about sharding in the docs. There is also a user guide on the topic, with concrete examples.
"},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-authconfig-and-related-clusterrole","title":"\u2778 Create the AuthConfig
and related ClusterRole
","text":"Create the AuthConfig
with the auth rules to validate other AuthConfig resources applied to the cluster.
The AuthConfig to validate other AuthConfigs will enforce the following rules:
- Authorino features that cannot be used by any application in their security schemes:
- Anonymous Access
- Plain identity object extracted from context
- Kubernetes authentication (TokenReview)
- Kubernetes authorization (SubjectAccessReview)
- Festival Wristband tokens
- Authorino features that require a RoleBinding to a specific ClusterRole in the 'authorino' namespace, to be used in a AuthConfig:
- Authorino API key authentication
- All metadata pulled from external sources must be cached for precisely 5 minutes (300 seconds)
kubectl -n authorino apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: authconfig-validator\nspec:\n # admissionreview requests will be sent to this host name\n hosts:\n\n - authorino-authorino-authorization.authorino.svc\n\n # because we're using a single authorino instance for the validating webhook and to protect the user applications,\n # skip operations related to this one authconfig in the 'authorino' namespace\n when:\n\n - selector: context.request.http.body.@fromstr|request.object.metadata.namespace\n operator: neq\n value: authorino\n\n # kubernetes admissionreviews carry info about the authenticated user\n authentication:\n \"k8s-userinfo\":\n plain:\n selector: context.request.http.body.@fromstr|request.userInfo\n\n authorization:\n \"features\":\n opa:\n rego: |\n authconfig = json.unmarshal(input.context.request.http.body).request.object\n\n forbidden { count(object.get(authconfig.spec, \"authentication\", [])) == 0 }\n forbidden { authconfig.spec.authentication[_].anonymous }\n forbidden { authconfig.spec.authentication[_].kubernetesTokenReview }\n forbidden { authconfig.spec.authentication[_].plain }\n forbidden { authconfig.spec.authorization[_].kubernetesSubjectAccessReview }\n forbidden { authconfig.spec.response.success.headers[_].wristband }\n\n apiKey { authconfig.spec.authentication[_].apiKey }\n\n allow { count(authconfig.spec.authentication) > 0; not forbidden }\n allValues: true\n\n \"apikey-authn-requires-k8s-role-binding\":\n priority: 1\n when:\n\n - selector: auth.authorization.features.apiKey\n operator: eq\n value: \"true\"\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.username\n resourceAttributes:\n namespace: { value: authorino }\n group: { value: authorino.kuadrant.io }\n resource: { value: authconfigs-with-apikeys }\n verb: { value: create }\n\n \"metadata-cache-ttl\":\n priority: 1\n opa:\n rego: |\n invalid_ttl = input.auth.authorization.features.authconfig.spec.metadata[_].cache.ttl != 300\n allow { not invalid_ttl }\nEOF\n
Define a ClusterRole
to control the usage of protected features of Authorino:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: authorino-apikey\nrules:\n\n- apiGroups: [\"authorino.kuadrant.io\"]\n resources: [\"authconfigs-with-apikeys\"] # not a real k8s resource\n verbs: [\"create\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-validatingwebhookconfiguration","title":"\u2779 Create the ValidatingWebhookConfiguration
","text":"kubectl -n authorino apply -f -<<EOF\napiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: authconfig-authz\n annotations:\n cert-manager.io/inject-ca-from: authorino/authorino-ca-cert\nwebhooks:\n\n- name: check-authconfig.authorino.kuadrant.io\n clientConfig:\n service:\n namespace: authorino\n name: authorino-authorino-authorization\n port: 5001\n path: /check\n rules:\n - apiGroups: [\"authorino.kuadrant.io\"]\n apiVersions: [\"v1beta2\"]\n resources: [\"authconfigs\"]\n operations: [\"CREATE\", \"UPDATE\"]\n scope: Namespaced\n sideEffects: None\n admissionReviewVersions: [\"v1\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#try-it-out","title":"\u277a Try it out","text":"Create a namespace:
kubectl create namespace myapp\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-a-valid-authconfig","title":"With a valid AuthConfig
","text":"Kuadrant users \u2013 For this and other example AuthConfigs below, if you create a Kuadrant AuthPolicy
instead, the output of the commands shall differ. The requested AuthPolicy may be initially accepted, but its state will turn ready or not ready depending on whether the corresponding AuthConfig requested by Kuadrant is accepted or rejected, according to the validating webhook rules. Check the state of the resources to confirm. For more, see Kuadrant auth. kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection created\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-forbidden-features","title":"With forbidden features","text":"Anonymous access:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":null}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"anonymous-access\":\n anonymous: {}\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"anonymous-access\\\":{\\\"anonymous\\\":{}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"anonymous-access\":{\"anonymous\":{}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Kubernetes TokenReview:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"k8s-tokenreview\":\n kubernetesTokenReview:\n audiences: [\"myapp\"]\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"k8s-tokenreview\\\":{\\\"kubernetesTokenReview\\\":{\\\"audiences\\\":[\\\"myapp\\\"]}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"k8s-tokenreview\":{\"kubernetesTokenReview\":{\"audiences\":[\"myapp\"]}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Plain identity extracted from context:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"envoy-jwt-authn\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"envoy-jwt-authn\\\":{\\\"plain\\\":{\\\"selector\\\":\\\"context.metadata_context.filter_metadata.envoy\\\\\\\\.filters\\\\\\\\.http\\\\\\\\.jwt_authn|verified_jwt\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"envoy-jwt-authn\":{\"plain\":{\"selector\":\"context.metadata_context.filter_metadata.envoy\\\\.filters\\\\.http\\\\.jwt_authn|verified_jwt\"}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Kubernetes SubjectAccessReview:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"k8s-subjectaccessreview\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.sub\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"authorization\\\":{\\\"k8s-subjectaccessreview\\\":{\\\"kubernetesSubjectAccessReview\\\":{\\\"user\\\":{\\\"selector\\\":\\\"auth.identity.sub\\\"}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authorization\":{\"k8s-subjectaccessreview\":{\"kubernetesSubjectAccessReview\":{\"user\":{\"selector\":\"auth.identity.sub\"}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Festival Wristband tokens:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: wristband-signing-key\nstringData:\n key.pem: |\n -----BEGIN EC PRIVATE KEY-----\n MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n -----END EC PRIVATE KEY-----\ntype: Opaque\n---\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n response:\n success:\n headers:\n \"wristband\":\n wristband:\n issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\n signingKeyRefs:\n - algorithm: ES256\n name: wristband-signing-key\nEOF\n# secret/wristband-signing-key created\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"response\\\":{\\\"success\\\":{\\\"headers\\\":{\\\"wristband\\\":{\\\"wristband\\\":{\\\"issuer\\\":\\\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\\\",\\\"signingKeyRefs\\\":[{\\\"algorithm\\\":\\\"ES256\\\",\\\"name\\\":\\\"wristband-signing-key\\\"}]}}}}}}}\\n\"}},\"spec\":{\"response\":{\"success\":{\"headers\":{\"wristband\":{\"wristband\":{\"issuer\":\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\",\"signingKeyRefs\":[{\"algorithm\":\"ES256\",\"name\":\"wristband-signing-key\"}]}}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-additional-permissions","title":"With features that require additional permissions","text":"Before adding the required permissions:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels: { app: myapp }\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"api-key\\\":{\\\"apiKey\\\":{\\\"selector\\\":{\\\"matchLabels\\\":{\\\"app\\\":\\\"myapp\\\"}}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":{\"apiKey\":{\"selector\":{\"matchLabels\":{\"app\":\"myapp\"}}}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Not authorized: unknown reason\n
Add the required permissions:
kubectl -n authorino apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: authorino-apikey\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: authorino-apikey\nsubjects:\n\n- kind: User\n name: kubernetes-admin\nEOF\n# rolebinding.rbac.authorization.k8s.io/authorino-apikey created\n
After adding the required permissions:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels: { app: myapp }\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-specific-property-validation","title":"With features that require specific property validation","text":"Invalid:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"external-source\":\n http:\n url: http://metadata.io\n cache:\n key: { value: global }\n ttl: 60\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"metadata\\\":{\\\"external-source\\\":{\\\"cache\\\":{\\\"key\\\":{\\\"value\\\":\\\"global\\\"},\\\"ttl\\\":60},\\\"http\\\":{\\\"url\\\":\\\"http://metadata.io\\\"}}}}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":null,\"keycloak\":{\"jwt\":{\"issuerUrl\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\"}}},\"metadata\":{\"external-source\":{\"cache\":{\"key\":{\"value\":\"global\"},\"ttl\":60},\"http\":{\"url\":\"http://metadata.io\"}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Valid:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"external-source\":\n http:\n url: http://metadata.io\n cache:\n key: { value: global }\n ttl: 300\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete namespace myapp\nkubectl delete namespace authorino\nkubectl delete clusterrole authorino-apikey\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino-operator/","title":"Authorino Operator","text":"A Kubernetes Operator to manage Authorino instances.
"},{"location":"authorino-operator/#installation","title":"Installation","text":"The Operator can be installed by applying the manifests to the Kubernetes cluster or using Operator Lifecycle Manager (OLM)
"},{"location":"authorino-operator/#applying-the-manifests-to-the-cluster","title":"Applying the manifests to the cluster","text":" - Create the namespace for the Operator
kubectl create namespace authorino-operator\n
- Install the Operator manifests
make install\n
- Deploy the Operator
make deploy\n
Tip: Deploy a custom image of the Operator To deploy an image of the Operator other than the default quay.io/kuadrant/authorino-operator:latest
, specify by setting the OPERATOR_IMAGE
parameter. E.g.: make deploy OPERATOR_IMAGE=authorino-operator:local\n
"},{"location":"authorino-operator/#installing-via-olm","title":"Installing via OLM","text":"To install the Operator using the Operator Lifecycle Manager, you need to make the Operator CSVs available in the cluster by creating a CatalogSource
resource.
The bundle and catalog images of the Operator are available in Quay.io:
Bundle quay.io/kuadrant/authorino-operator-bundle Catalog quay.io/kuadrant/authorino-operator-catalog - Create the namespace for the Operator
kubectl create namespace authorino-operator\n
- Create the CatalogSource resource pointing to one of the images from in the Operator's catalog repo:
kubectl -n authorino-operator apply -f -<<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n name: operatorhubio-catalog\n namespace: authorino-operator\nspec:\n sourceType: grpc\n image: quay.io/kuadrant/authorino-operator-catalog:latest\n displayName: Authorino Operator\nEOF\n
"},{"location":"authorino-operator/#requesting-an-authorino-instance","title":"Requesting an Authorino instance","text":"Once the Operator is up and running, you can request instances of Authorino by creating Authorino
CRs. E.g.:
kubectl -n default apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino-operator/#the-authorino-custom-resource-definition-crd","title":"The Authorino
Custom Resource Definition (CRD)","text":"API to install, manage and configure Authorino authorization services .
Each Authorino
Custom Resource (CR) represents an instance of Authorino deployed to the cluster. The Authorino Operator will reconcile the state of the Kubernetes Deployment and associated resources, based on the state of the CR.
"},{"location":"authorino-operator/#api-specification","title":"API Specification","text":"Field Type Description Required/Default spec AuthorinoSpec Specification of the Authorino deployment. Required"},{"location":"authorino-operator/#authorinospec","title":"AuthorinoSpec","text":"Field Type Description Required/Default clusterWide Boolean Sets the Authorino instance's watching scope \u2013 cluster-wide or namespaced. Default: true
(cluster-wide) authConfigLabelSelectors String Label selectors used by the Authorino instance to filter AuthConfig
-related reconciliation events. Default: empty (all AuthConfigs are watched) secretLabelSelectors String Label selectors used by the Authorino instance to filter Secret
-related reconciliation events (API key and mTLS authentication methods). Default: authorino.kuadrant.io/managed-by=authorino
supersedingHostSubsets Boolean Enable/disable allowing AuthConfigs to supersede strict subsets of hosts already taken. Default: false
replicas Integer Number of replicas desired for the Authorino instance. Values greater than 1 enable leader election in the Authorino service, where the leader updates the statuses of the AuthConfig
CRs). Default: 1 evaluatorCacheSize Integer Cache size (in megabytes) of each Authorino evaluator (when enabled in an AuthConfig
). Default: 1 image String Authorino image to be deployed (for dev/testing purpose only). Default: quay.io/kuadrant/authorino:latest
imagePullPolicy String Sets the imagePullPolicy of the Authorino Deployment (for dev/testing purpose only). Default: k8s default logLevel String Defines the level of log you want to enable in Authorino (debug
, info
and error
). Default: info
logMode String Defines the log mode in Authorino (development
or production
). Default: production
listener Listener Specification of the authorization service (gRPC interface). Required oidcServer OIDCServer Specification of the OIDC service. Required tracing Tracing Configuration of the OpenTelemetry tracing exporter. Optional metrics Metrics Configuration of the metrics server (port, level). Optional healthz Healthz Configuration of the health/readiness probe (port). Optional volumes VolumesSpec Additional volumes to be mounted in the Authorino pods. Optional"},{"location":"authorino-operator/#listener","title":"Listener","text":"Configuration of the authorization server \u2013 gRPC and raw HTTP interfaces
Field Type Description Required/Default port Integer Port number of authorization server (gRPC interface). DEPRECATEDUse ports
instead ports Ports Port numbers of the authorization server (gRPC and raw HTTPinterfaces). Optional tls TLS TLS configuration of the authorization server (GRPC and HTTP interfaces). Required timeout Integer Timeout of external authorization request (in milliseconds), controlled internally by the authorization server. Default: 0
(disabled)"},{"location":"authorino-operator/#oidcserver","title":"OIDCServer","text":"Configuration of the OIDC Discovery server for Festival Wristband tokens.
Field Type Description Required/Default port Integer Port number of OIDC Discovery server for Festival Wristband tokens. Default: 8083
tls TLS TLS configuration of the OIDC Discovery server for Festival Wristband tokens Required"},{"location":"authorino-operator/#tls","title":"TLS","text":"TLS configuration of server. Appears in listener
and oidcServer
.
Field Type Description Required/Default enabled Boolean Whether TLS is enabled or disabled for the server. Default: true
certSecretRef LocalObjectReference The reference to the secret that contains the TLS certificates tls.crt
and tls.key
. Required when enabled: true
"},{"location":"authorino-operator/#ports","title":"Ports","text":"Port numbers of the authorization server.
Field Type Description Required/Default grpc Integer Port number of the gRPC interface of the authorization server. Set to 0 to disable this interface. Default: 50001
http Integer Port number of the raw HTTP interface of the authorization server. Set to 0 to disable this interface. Default: 5001
"},{"location":"authorino-operator/#tracing","title":"Tracing","text":"Configuration of the OpenTelemetry tracing exporter.
Field Type Description Required/Default endpoint String Full endpoint of the OpenTelemetry tracing collector service (e.g. http://jaeger:14268/api/traces). Required tags Map Key-value map of fixed tags to add to all OpenTelemetry traces emitted by Authorino. Optional insecure Boolean Enable/disable insecure connection to the tracing endpoint Default: false
"},{"location":"authorino-operator/#metrics","title":"Metrics","text":"Configuration of the metrics server.
Field Type Description Required/Default port Integer Port number of the metrics server. Default: 8080
deep Boolean Enable/disable metrics at the level of each evaluator config (if requested in the AuthConfig
) exported by the metrics server. Default: false
"},{"location":"authorino-operator/#healthz","title":"Healthz","text":"Configuration of the health/readiness probe (port).
Field Type Description Required/Default port Integer Port number of the health/readiness probe. Default: 8081
"},{"location":"authorino-operator/#volumesspec","title":"VolumesSpec","text":"Additional volumes to project in the Authorino pods. Useful for validation of TLS self-signed certificates of external services known to have to be contacted by Authorino at runtime.
Field Type Description Required/Default items []VolumeSpec List of additional volume items to project. Optional defaultMode Integer Mode bits used to set permissions on the files. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. Optional"},{"location":"authorino-operator/#volumespec","title":"VolumeSpec","text":"Field Type Description Required/Default name String Name of the volume and volume mount within the Deployment. It must be unique in the CR. Optional mountPath String Absolute path where to mount all the items. Required configMaps []String List of of Kubernetes ConfigMap names to mount. Required exactly one of: confiMaps
, secrets
. secrets []String List of of Kubernetes Secret names to mount. Required exactly one of: confiMaps
, secrets
. items []KeyToPath Mount details for selecting specific ConfigMap or Secret entries. Optional"},{"location":"authorino-operator/#full-example","title":"Full example","text":"apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n authConfigLabelSelectors: environment=production\n secretLabelSelectors: authorino.kuadrant.io/component=authorino,environment=production\n\n replicas: 2\n\n evaluatorCacheSize: 2 # mb\n\n image: quay.io/kuadrant/authorino:latest\n imagePullPolicy: Always\n\n logLevel: debug\n logMode: production\n\n listener:\n ports:\n grpc: 50001\n http: 5001\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n oidcServer:\n port: 8083\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n metrics:\n port: 8080\n deep: true\n\n volumes:\n items:\n\n - name: keycloak-tls-cert\n mountPath: /etc/ssl/certs\n configMaps:\n - keycloak-tls-cert\n items: # details to mount the k8s configmap in the authorino pods\n - key: keycloak.crt\n path: keycloak.crt\n defaultMode: 420\n
"},{"location":"limitador/","title":"Limitador","text":"Limitador is a generic rate-limiter written in Rust. It can be used as a library, or as a service. The service exposes HTTP endpoints to apply and observe limits. Limitador can be used with Envoy because it also exposes a grpc service, on a different port, that implements the Envoy Rate Limit protocol (v3).
- Getting started
- How it works
- Configuration
- Development
- Testing Environment
- Kubernetes
- Contributing
- License
Limitador is under active development, and its API has not been stabilized yet.
"},{"location":"limitador/#getting-started","title":"Getting started","text":" - Rust library
- Server
"},{"location":"limitador/#rust-library","title":"Rust library","text":"Add this to your Cargo.toml
:
[dependencies]\nlimitador = { version = \"0.3.0\" }\n
For more information, see the README
of the crate
"},{"location":"limitador/#server","title":"Server","text":"Run with Docker (replace latest
with the version you want):
docker run --rm --net=host -it quay.io/kuadrant/limitador:v1.0.0\n
Run locally:
cargo run --release --bin limitador-server -- --help\n
Refer to the help message on how to start up the server. More information are available in the server's README.md
"},{"location":"limitador/#development","title":"Development","text":""},{"location":"limitador/#build","title":"Build","text":"cargo build\n
"},{"location":"limitador/#run-the-tests","title":"Run the tests","text":"Some tests need a redis deployed in localhost:6379
. You can run it in Docker with:
docker run --rm -p 6379:6379 -it redis\n
Then, run the tests:
cargo test --all-features\n
or you can run tests disabling the \"redis storage\" feature:
cd limitador; cargo test --no-default-features\n
"},{"location":"limitador/#contributing","title":"Contributing","text":"Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"limitador/#license","title":"License","text":"Apache 2.0 License
"},{"location":"limitador/doc/how-it-works/","title":"How it works","text":""},{"location":"limitador/doc/how-it-works/#how-it-works","title":"How it works","text":"Limitador ensures that the most restrictive limit configuration will apply.
Limitador will try to match each incoming descriptor with the same namespaced counter's conditions and variables. The namespace for the descriptors is defined by the domain
field whereas for the rate limit configuration the namespace
field is being used. For each matching counter, the counter is increased and the limits checked.
One example to illustrate:
Let's say we have 1 rate limit configuration (one counter per config):
conditions: [\"KEY_A == 'VALUE_A'\"]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Limitador receives one descriptor with two entries:
domain: example.org\ndescriptors:\n\n - entries:\n - KEY_A: VALUE_A\n - OTHER_KEY: OTHER_VALUE\n
The counter's condition will match. Then, the counter will be increased and the limit checked. If the limit is exceeded, the request will be rejected with 429 Too Many Requests
, otherwise accepted.
Note that the counter is being activated even though it does not match all the entries of the descriptor. The same rule applies for the variables field.
Currently, the implementation of condition only allow for equal (==
) and not equal (!=
) operators. More operators will be implemented based off the use cases for them.
The variables field is a list of keys. The matching rule is defined just as the existence of the list of descriptor entries with the same key values. If variables is variables: [A, B, C]
, one descriptor matches if it has at least three entries with the same A, B, C keys.
Few examples to illustrate.
Having the following descriptors:
domain: example.org\ndescriptors:\n\n - entries:\n - KEY_A: VALUE_A\n - OTHER_KEY: OTHER_VALUE\n
the following counters would not be activated.
conditions: [\"KEY_B == 'VALUE_B'\"]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Reason: conditions key does not exist conditions:\n\n - \"KEY_A == 'VALUE_A'\"\n - \"OTHER_KEY == 'WRONG_VALUE'\"\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Reason: not all the conditions match conditions: []\nmax_value: 1\nseconds: 60\nvariables: [\"MY_VAR\"]\nnamespace: example.org\n
Reason: the variable name does not exist conditions: [\"KEY_B == 'VALUE_B'\"]\nmax_value: 1\nseconds: 60\nvariables: [\"MY_VAR\"]\nnamespace: example.org\n
Reason: Both variables and conditions must match. In this particular case, only conditions match"},{"location":"limitador/doc/topologies/","title":"Deployment topologies","text":""},{"location":"limitador/doc/topologies/#in-memory","title":"In-memory","text":""},{"location":"limitador/doc/topologies/#redis","title":"Redis","text":""},{"location":"limitador/doc/topologies/#redis-active-active-storage","title":"Redis active-active storage","text":"The RedisLabs version of Redis supports active-active replication. Limitador is compatible with that deployment mode, but there are a few things to take into account regarding limit accuracy.
"},{"location":"limitador/doc/topologies/#considerations","title":"Considerations","text":"With an active-active deployment, the data needs to be replicated between instances. An update in an instance takes a short time to be reflected in the other. That time lag depends mainly on the network speed between the Redis instances, and it affects the accuracy of the rate-limiting performed by Limitador because it can go over limits while the updates of the counters are being replicated.
The impact of that greatly depends on the use case. With limits of a few seconds, and a low number of hits, we could easily go over limits. On the other hand, if we have defined limits with a high number of hits and a long period, the effect will be basically negligible. For example, if we define a limit of one hour, and we know that the data takes around one second to be replicated, the accuracy loss is going to be negligible.
"},{"location":"limitador/doc/topologies/#set-up","title":"Set up","text":"In order to try active-active replication, you can follow this tutorial from RedisLabs.
"},{"location":"limitador/doc/topologies/#disk","title":"Disk","text":"Disk storage using RocksDB. Counters are held on disk (persistent).
"},{"location":"limitador/doc/migrations/conditions/","title":"New condition syntax","text":"With limitador-server
version 1.0.0
(and the limitador
crate version 0.3.0
), the syntax for condition
s within limit
definitions has changed.
"},{"location":"limitador/doc/migrations/conditions/#changes","title":"Changes","text":""},{"location":"limitador/doc/migrations/conditions/#the-new-syntax","title":"The new syntax","text":"The new syntax formalizes what part of an expression is the identifier and which is the value to test against. Identifiers are simple string value, while string literals are to be demarcated by single quotes ('
) or double quotes (\"
) so that foo == \" bar\"
now makes it explicit that the value is to be prefixed with a space character.
A few remarks:
- Only
string
values are supported, as that's what they really are - There is no escape character sequence supported in string literals
- A new operator has been added,
!=
"},{"location":"limitador/doc/migrations/conditions/#the-issue-with-the-deprecated-syntax","title":"The issue with the deprecated syntax","text":"The previous syntax wouldn't differentiate between values and the identifier, so that foo == bar
was valid. In this case foo
was the identifier of the variable, while bar
was the value to evaluate it against. Whitespaces before and after the operator ==
would be equally important. SO that foo == bar
would test for a foo
variable being equal to bar
where the trailing whitespace after the identifier, and the one prefixing the value, would have been evaluated.
"},{"location":"limitador/doc/migrations/conditions/#server-binary-users","title":"Server binary users","text":"The server still allows for the deprecated syntax, but warns about its usage. You can easily migrate your limits file, using the following command:
limitador-server --validate old_limits.yaml > updated_limits.yaml\n
Which should output Deprecated syntax for conditions corrected!
to stderr
while stdout
would be the limits using the new syntax. It is recommended you manually verify the resulting LIMITS_FILE
.
"},{"location":"limitador/doc/migrations/conditions/#crate-users","title":"Crate users","text":"A feature lenient_conditions
has been added, which lets you use the syntax used in previous version of the crate. The function limitador::limit::check_deprecated_syntax_usages_and_reset()
lets you verify if the deprecated syntax has been used as limit::Limit
s are created with their condition strings using the deprecated syntax.
"},{"location":"limitador/doc/server/configuration/","title":"Limitador configuration","text":""},{"location":"limitador/doc/server/configuration/#command-line-configuration","title":"Command line configuration","text":"The preferred way of starting and configuring the Limitador server is using the command line:
Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n memory Counters are held in Limitador (ephemeral)\n disk Counters are held on disk (persistent)\n redis Uses Redis to store counters\n redis_cached Uses Redis to store counters, with an in-memory cache\n\nArguments:\n <LIMITS_FILE> The limit file to use\n\nOptions:\n -b, --rls-ip <ip>\n The IP to listen on for RLS [default: 0.0.0.0]\n -p, --rls-port <port>\n The port to listen on for RLS [default: 8081]\n -B, --http-ip <http_ip>\n The IP to listen on for HTTP [default: 0.0.0.0]\n -P, --http-port <http_port>\n The port to listen on for HTTP [default: 8080]\n -l, --limit-name-in-labels\n Include the Limit Name in prometheus label\n -v...\n Sets the level of verbosity\n --tracing-endpoint <tracing_endpoint>\n The endpoint for the tracing service\n --validate\n Validates the LIMITS_FILE and exits\n -H, --rate-limit-headers <rate_limit_headers>\n Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n --grpc-reflection-service\n Enables gRPC server reflection service\n -h, --help\n Print help\n -V, --version\n Print version\n
The values used are authoritative over any environment variables independently set.
"},{"location":"limitador/doc/server/configuration/#limit-definitions","title":"Limit definitions","text":"The LIMITS_FILE
provided is the source of truth for all the limits that will be enforced. The file location will be monitored by the server for any changes and be hot reloaded. If the changes are invalid, they will be ignored on hot reload, or the server will fail to start.
"},{"location":"limitador/doc/server/configuration/#the-limits_files-format","title":"The LIMITS_FILE
's format","text":"When starting the server, you point it to a LIMITS_FILE
, which is expected to be a yaml file with an array of limit
definitions, with the following format:
---\n\"$schema\": http://json-schema.org/draft-04/schema#\ntype: object\nproperties:\n name:\n type: string\n namespace:\n type: string\n seconds:\n type: integer\n max_value:\n type: integer\n conditions:\n type: array\n items:\n\n - type: string\n variables:\n type: array\n items:\n - type: string\nrequired:\n - namespace\n - seconds\n - max_value\n - conditions\n - variables\n
Here is an example of such a limit definition:
namespace: example.org\nmax_value: 10\nseconds: 60\nconditions:\n\n - \"req.method == 'GET'\"\nvariables:\n - user_id\n
namespace
namespaces the limit, will generally be the domain, see here seconds
is the duration for which the limit applies, in seconds: e.g. 60
is a span of time of one minute max_value
is the actual limit, e.g. 100
would limit to 100 requests name
lets the user optionally name the limit variables
is an array of variables, which once resolved, will be used to qualify counters for the limit, e.g. api_key
to limit per api keys conditions
is an array of conditions, which once evaluated will decide whether to apply the limit or not
"},{"location":"limitador/doc/server/configuration/#condition-syntax","title":"condition
syntax","text":"Each condition
is an expression producing a boolean value (true
or false
). All conditions
must evaluate to true
for the limit
to be applied on a request.
Expressions follow the following syntax: $IDENTIFIER $OP $STRING_LITERAL
, where:
$IDENTIFIER
will be used to resolve the value at evaluation time, e.g. role
$OP
is an operator, either ==
or !=
$STRING_LITERAL
is a literal string value, \"
or '
demarcated, e.g. \"admin\"
So that role != \"admin\"
would apply the limit on request from all users, but admin
's.
"},{"location":"limitador/doc/server/configuration/#counter-storages","title":"Counter storages","text":"Limitador will load all the limit
definitions from the LIMITS_FILE
and keep these in memory. To enforce these limits, Limitador needs to track requests in the form of counters. There would be at least one counter per limit, but that number grows when variables
are used to qualify counters per some arbitrary values.
"},{"location":"limitador/doc/server/configuration/#memory","title":"memory
","text":"As the name implies, Limitador will keep all counters in memory. This yields the best results in terms of latency as well as accuracy. By default, only up to 1000
\"concurrent\" counters will be kept around, evicting the oldest entries. \"Concurrent\" in this context means counters that need to exist at the \"same time\", based of the period of the limit, as \"expired\" counters are discarded.
This storage is ephemeral, as if the process is restarted, all the counters are lost and effectively \"reset\" all the limits as if no traffic had been rate limited, which can be fine for short-lived limits, less for longer-lived ones.
"},{"location":"limitador/doc/server/configuration/#redis","title":"redis
","text":"When you want persistence of your counters, such as for disaster recovery or across restarts, using redis
will store the counters in a redis instance using the provided URL
. Increments to individual counters is made within redis itself, providing accuracy over these, races tho can occur when multiple Limitador servers are used against a single redis and using \"stacked\" limits (i.e. over different periods). Latency is also impacted, as it results in one additional hop to talk to redis and maintain the counters.
TLS Support
Connect to a redis instance using the rediss://
URL scheme.
To enable insecure mode, append #insecure
at the end of the URL. For example:
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n
Authentication
To enable authentication, use the username and password properties of the URL scheme. For example:
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n
when the username is omitted, redis assumes default
user. For example:
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n
Usage
Uses Redis to store counters\n\nUsage: limitador-server <LIMITS_FILE> redis <URL>\n\nArguments:\n <URL> Redis URL to use\n\nOptions:\n -h, --help Print help\n
"},{"location":"limitador/doc/server/configuration/#redis_cached","title":"redis_cached
","text":"In order to avoid some communication overhead to redis, redis_cached
adds an in memory caching layer within the Limitador servers. This lowers the latency, but sacrifices some accuracy as it will not only cache counters, but also coalesce counters updates to redis over time. See this configuration option for more information.
TLS Support
Connect to a redis instance using the rediss://
URL scheme.
To enable insecure mode, append #insecure
at the end of the URL. For example:
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n
Authentication
To enable authentication, use the username and password properties of the URL scheme. For example:
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n
when the username is omitted, redis assumes default
user. For example:
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n
Usage
Uses Redis to store counters, with an in-memory cache\n\nUsage: limitador-server <LIMITS_FILE> redis_cached [OPTIONS] <URL>\n\nArguments:\n <URL> Redis URL to use\n\nOptions:\n --batch-size <batch> Size of entries to flush in as single flush [default: 100]\n --flush-period <flush> Flushing period for counters in milliseconds [default: 1000]\n --max-cached <max> Maximum amount of counters cached [default: 10000]\n --response-timeout <timeout> Timeout for Redis commands in milliseconds [default: 350]\n -h, --help Print help\n
"},{"location":"limitador/doc/server/configuration/#disk","title":"disk
","text":"Disk storage using RocksDB. Counters are held on disk (persistent).
Counters are held on disk (persistent)\n\nUsage: limitador-server <LIMITS_FILE> disk [OPTIONS] <PATH>\n\nArguments:\n <PATH> Path to counter DB\n\nOptions:\n --optimize <OPTIMIZE> Optimizes either to save disk space or higher throughput [default: throughput] [possible values: throughput, disk]\n -h, --help Print help\n
For an in-depth coverage of the different topologies supported and how they affect the behavior, see the topologies' document.
"},{"location":"limitador/doc/server/configuration/#configuration-using-environment-variables","title":"Configuration using environment variables","text":"The Limitador server has some options that can be configured with environment variables. These will override the default values the server uses. Any argument used when starting the server will prevail over the environment variables.
"},{"location":"limitador/doc/server/configuration/#envoy_rls_host","title":"ENVOY_RLS_HOST
","text":" - Host where the Envoy RLS server listens.
- Optional. Defaults to
\"0.0.0.0\"
. - Format:
string
.
"},{"location":"limitador/doc/server/configuration/#envoy_rls_port","title":"ENVOY_RLS_PORT
","text":" - Port where the Envoy RLS server listens.
- Optional. Defaults to
8081
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#http_api_host","title":"HTTP_API_HOST
","text":" - Host where the HTTP server listens.
- Optional. Defaults to
\"0.0.0.0\"
. - Format:
string
.
"},{"location":"limitador/doc/server/configuration/#http_api_port","title":"HTTP_API_PORT
","text":" - Port where the HTTP API listens.
- Optional. Defaults to
8080
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#limits_file","title":"LIMITS_FILE
","text":" - YAML file that contains the limits to create when Limitador boots. If the limits specified already have counters associated, Limitador will not delete them. Changes to the file will be picked up by the running server.
- Required. No default
- Format:
string
, file path.
"},{"location":"limitador/doc/server/configuration/#limit_name_in_prometheus_labels","title":"LIMIT_NAME_IN_PROMETHEUS_LABELS
","text":" - Enables using limit names as labels in Prometheus metrics. This is disabled by default because for a few limits it should be fine, but it could become a problem when defining lots of limits. See the caution note in the Prometheus docs
- Optional. Disabled by default.
- Format:
bool
, set to \"1\"
to enable.
"},{"location":"limitador/doc/server/configuration/#tracing_endpoint","title":"TRACING_ENDPOINT
","text":" - The endpoint of the OTLP tracing collector (scheme://host:port).
- Optional. Default to
\"\"
(tracing disabled) - Format:
string
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_enabled","title":"REDIS_LOCAL_CACHE_ENABLED
","text":" - Enables a storage implementation that uses Redis, but also caches some data in memory. The idea is to improve throughput and latencies by caching the counters in memory to reduce the number of accesses to Redis. To achieve that, this mode sacrifices some rate-limit accuracy. This mode does two things:
- Batches counter updates. Instead of updating the counters on every request, it updates them in memory and commits them to Redis in batches. The flushing interval can be configured with the
REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS
env. The trade-off is that when running several instances of Limitador, other instances will not become aware of the counter updates until they're committed to Redis. - Caches counters. Instead of fetching the value of a counter every time it's needed, the value is cached for a configurable period. The trade-off is that when running several instances of Limitador, an instance will not become aware of the counter updates other instances do while the value is cached. When a counter is already at 0 (limit exceeded), it's cached until it expires in Redis. In this case, no matter what other instances do, we know that the quota will not be reestablished until the key expires in Redis, so in this case, rate-limit accuracy is not affected. When a counter has still some quota remaining the situation is different, that's why we can tune for how long it will be cached. The formula is as follows: MIN(ttl_in_redis/
REDIS_LOCAL_CACHE_TTL_RATIO_CACHED_COUNTERS
, REDIS_LOCAL_CACHE_MAX_TTL_CACHED_COUNTERS_MS
). For example, let's image that the current TTL (time remaining until the limit resets) in Redis for a counter is 10 seconds, and we set the ratio to 2, and the max time for 30s. In this case, the counter will be cached for 5s (min(10/2, 30)). During those 5s, Limitador will not fetch the value of that counter from Redis, so it will answer faster, but it will also miss the updates done by other instances, so it can go over the limits in that 5s interval.
- Optional. Disabled by default.
- Format: set to \"1\" to enable.
- Note: \"REDIS_URL\" needs to be set.
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_flushing_period_ms","title":"REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS
","text":" - Used to configure the maximum flushing period. See
REDIS_LOCAL_CACHE_ENABLED
. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1
. - Optional. Defaults to
1000
. - Format:
integer
. Duration in milliseconds.
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_batch_size","title":"REDIS_LOCAL_CACHE_BATCH_SIZE
","text":" - Used to configure the maximum number of counters to update in a flush. See
REDIS_LOCAL_CACHE_ENABLED
. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1
. - Optional. Defaults to
100
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#redis_url","title":"REDIS_URL
","text":" - Redis URL. Required only when you want to use Redis to store the limits.
- Optional. By default, Limitador stores the limits in memory and does not require Redis.
- Format:
string
, URL in the format of \"redis://127.0.0.1:6379\"
.
"},{"location":"limitador/doc/server/configuration/#rust_log","title":"RUST_LOG
","text":" - Defines the log level.
- Optional. Defaults to
\"error\"
. - Format:
enum
: \"debug\"
, \"error\"
, \"info\"
, \"warn\"
, or \"trace\"
.
"},{"location":"limitador/doc/server/configuration/#rate_limit_headers","title":"RATE_LIMIT_HEADERS
","text":" - Enables rate limit response headers. Only supported by the RLS server.
- Optional. Defaults to
\"NONE\"
. - Must be one of:
\"NONE\"
- Does not add any additional headers to the http response. \"DRAFT_VERSION_03\"
. Adds response headers per https://datatracker.ietf.org/doc/id/draft-polli-ratelimit-headers-03.html
"},{"location":"limitador/limitador/","title":"Limitador (library)","text":"An embeddable rate-limiter library supporting in-memory, Redis and disk data stores.
For the complete documentation of the crate's API, please refer to docs.rs
"},{"location":"limitador/limitador/#features","title":"Features","text":" redis_storage
: support for using Redis as the data storage backend. disk_storage
: support for using RocksDB as a local disk storage backend. lenient_conditions
: support for the deprecated syntax of Condition
s default
: redis_storage
.
"},{"location":"limitador/limitador-server/","title":"Limitador (server)","text":"By default, Limitador starts the HTTP server in localhost:8080
, and the grpc service that implements the Envoy Rate Limit protocol in localhost:8081
. That can be configured with these ENVs: ENVOY_RLS_HOST
, ENVOY_RLS_PORT
, HTTP_API_HOST
, and HTTP_API_PORT
.
Or using the command line arguments:
Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n memory Counters are held in Limitador (ephemeral)\n disk Counters are held on disk (persistent)\n redis Uses Redis to store counters\n redis_cached Uses Redis to store counters, with an in-memory cache\n\nArguments:\n <LIMITS_FILE> The limit file to use\n\nOptions:\n -b, --rls-ip <ip>\n The IP to listen on for RLS [default: 0.0.0.0]\n -p, --rls-port <port>\n The port to listen on for RLS [default: 8081]\n -B, --http-ip <http_ip>\n The IP to listen on for HTTP [default: 0.0.0.0]\n -P, --http-port <http_port>\n The port to listen on for HTTP [default: 8080]\n -l, --limit-name-in-labels\n Include the Limit Name in prometheus label\n -v...\n Sets the level of verbosity\n --tracing-endpoint <tracing_endpoint>\n The endpoint for the tracing service\n --validate\n Validates the LIMITS_FILE and exits\n -H, --rate-limit-headers <rate_limit_headers>\n Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n -h, --help\n Print help\n -V, --version\n Print version\n
When using environment variables, these will override the defaults. While environment variable are themselves overridden by the command line arguments provided. See the individual STORAGES
help for more options relative to each of the storages.
The OpenAPI spec of the HTTP service is here.
Limitador has to be started with a YAML file that has some limits defined. There's an example file that allows 10 requests per minute and per user_id
when the HTTP method is \"GET\"
and 5 when it is a \"POST\"
. You can run it with Docker (replace latest
with the version you want):
docker run --rm --net=host -it -v $(pwd)/examples/limits.yaml:/home/limitador/my_limits.yaml:ro quay.io/kuadrant/limitador:latest limitador-server /home/limitador/my_limits.yaml\n
You can also use the YAML file when running locally:
cargo run --release --bin limitador-server ./examples/limits.yaml\n
If you want to use Limitador with Envoy, there's a minimal Envoy config for testing purposes here. The config forwards the \"userid\" header and the request method to Limitador. It assumes that there's an upstream API deployed on port 1323. You can use echo, for example.
Limitador has several options that can be configured via ENV. This doc specifies them.
"},{"location":"limitador/limitador-server/#limits-storage","title":"Limits storage","text":"Limitador can store its limits and counters in-memory, disk or in Redis. In-memory is faster, but the limits are applied per instance. When using Redis, multiple instances of Limitador can share the same limits, but it's slower.
"},{"location":"limitador/limitador-server/kubernetes/","title":"Kubernetes","text":"The purpose of this documentation is to deploy a sample application published via AWS ELB, that will be ratelimited at infrastructure level, thanks to the use the envoyproxy sidecar container, that will be in charge of contacting to a ratelimit service (limitador), that will allow the request (or not) if it is within the permitted limits.
There are mainly two recommended way of using limitador in kubernetes:
- There is an ingress based on envoyproxy that contacts with limitador ratelimit service before forwarding (or not) the request to the application
- There is an envoyproxy sidecar container living in the application pod that contacts with limitador ratelimit service before forwarding (or not) the request to the main application container in the same pod
In this example it will be described the second scenario (where there is an application with an envoyproxy sidecar container contacting to limitador service).
NOTE If you don't want to manually manage the sidecar container definitions on your deployments (harcoding the container spec, loading the envoy configuration from a configmap that requires a pod restart to reload possibles configuration changes...), you can use marin3r, a light weight envoy control plane that allows you to inject envoyproxy sidecar containers and dynamically consume configs from Kubernetes custom resources.
This is the network diagram of the deployed example:
"},{"location":"limitador/limitador-server/kubernetes/#components","title":"Components","text":"In order to that that ratelimit test, you need to deploy a few components. Some of them are mandatory, and a few are optional:
"},{"location":"limitador/limitador-server/kubernetes/#mandatory","title":"Mandatory","text":" - Application (a sample application deployment called
kuard
): - App has an
envoyproxy
sidecar container with its configuration file in a configmap, composed by: - Cluster
kuard
points to main application container (127.0.0.1:8080
) - Cluster
kuard_ratelimit
points to limitador headless service (limitador:8081
) - Listener HTTP points to envoyproxy sidecar (
0.0.0.0:38080
) - When envoy contacts with the ratelimit service, you can define a timeout, and if there is no response within that timeout (because ratelimit is overloaded taking more time to process the request, or because rateliit service is down), you can choose from envoy to deny the request or pass it to the application. In this case, there is set a 1s timeout, and if there is no answer in this 1 second, request is passed to the application (
failure_mode_deny: false
), so we guarantee that the maximum overhead added by a non working ratelimit service is 1 extra second to the final response time.
-
App service published with type: LoadBalancer
, which creates a AWS ELB. This service has an annotation to enable proxy protocol on the AWS Load balancer in order to be able to keep the real client IP at envoy level (instead of the k8s node private IP), so it can be used to ratelimit per each real client IP if desired.
-
Ratelimit application (a deployment called limitador
):
- Limitador Configmap with limits definition (1000 rps per hostname).
-
Limitador headless service published on limitador:8081
. It is important to use a headless service in order to balance correctly the traffic between limitador pods, otherwise GRPC connections are not well balanced.
-
Redis database to persist ratelimit configuration:
- Redis service
- Redis statefulset with a persistent volume
"},{"location":"limitador/limitador-server/kubernetes/#optional","title":"Optional","text":" - Centos pod:
- Used to executed
hey
tool benchmarks from the cluster, so we ensure network latency does not affect the results. Actually, to achieve better results, this pod should be on another cluster (to not share the network between client and network) and be placed on the same Region (to reduce latency). The client could be a bottle neck for the performance test. - This centos is going to public AWS ELB to access the app, so simulating it is a normal client from the same Region
- Prometheus monitoring and grafana dashboard resources
"},{"location":"limitador/limitador-server/kubernetes/#k8s-deployment","title":"K8s deployment","text":" -
Deploy the redis instance that will keep the limits for different limitador pods:
kubectl apply -f redis-service.yaml\nkubectl apply -f redis-statefulset.yaml\n
-
Deploy limitador application. It is important to create the configmap with limitador limits before the deployment, in order to load it from limitador pods. At the moment, if you update the limits configmap you need to restart the pods. Additionally, limitador has an API in order to load limits dynamically, but for simplicity for this test a configmap has been used:
kubectl apply -f limitador-config-configmap.yaml\nkubectl apply -f limitador-service.yaml\nkubectl apply -f limitador-deployment.yaml\n
-
Deploy sample kuard application with the envoyproxy sidecar container (if you do any change on the envoy configmap, remember you need to restart app pods in order to reload the config):
kubectl apply -f kuard-envoy-config-configmap.yaml\nkubectl apply -f kuard-service.yaml\nkubectl apply -f kuard-deployment.yaml\n
-
At this point you shoud see all pods running, and kuard pods should have 2 containers (the main kuard container, and the envoyproxy sidecar container):
\u25b6 kubectl get pods\nNAME READY STATUS RESTARTS AGE\nkuard-f859bb896-gmzxn 2/2 Running 0 7m\nkuard-f859bb896-z95w8 2/2 Running 0 7m\nlimitador-68d494f54c-qv996 1/1 Running 0 8m\nlimitador-68d494f54c-zzmhn 1/1 Running 0 8m\nredis-0 1/1 Running 0 9m\n
-
Now you should be able to access to kuard application using the load balancer DNS name:
\u25b6 kubectl get service kuard\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nkuard LoadBalancer 172.30.117.198 a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com 80:31450/TCP 4m\n
-
If you go to the browser and paste the EXTERNAL-IP
, your request will follow the next workflow:
- The requests will go from your local machine through internet to the public AWS ELB where the app is published
- Then it will go to the
NodePort
of your k8s cluster nodes - Once on a k8s node, it will go to kuard
Service
Virtual IP, and will arrive to an envoyproxy sidecar container inside kuard pod - Envoyproxy sidecar container will contact with limitador headless
Service
, to authorize the requests or not: - If the request is authorized (within the configured limits), it will send the request to the app container (
0.0.0.0:8080
) in the same pod, and request will end up with a HTTP 200
response - If the request is limited (beyond the limits), request will end up with
HTTP 429
response
"},{"location":"limitador/limitador-server/kubernetes/#monitoring","title":"Monitoring","text":"Both envoyproxy
sidecar and limitador
applications include built-in prometheus metrics.
"},{"location":"limitador/limitador-server/kubernetes/#prometheus","title":"Prometheus","text":"In order to scrape that metrics within a prometheus-operator deployed in the cluster, you need to create a PodMonitor
resource for every application:
kubectl apply -f kuard-podmonitor.yaml\nkubectl apply -f limitador-podmonitor.yaml\n
"},{"location":"limitador/limitador-server/kubernetes/#grafana-dashboard","title":"Grafana dashboard","text":"Then, if you have grafana deployed in the cluster, you can import a Kuadrant Limitador grafana dashboard that we have prepared, which includes:
- Kuard envoyproxy sidecar metrics (globally and per pod)
- Limitador metrics (globally and per pod)
- And for every deployed component (limitador, kuard, redis):
- Number of pods (total, available, unavaible, pod restarts...)
- CPU usage per pod
- Memory usage per pod
- Network usage per pod
"},{"location":"limitador/limitador-server/kubernetes/#benchmarking","title":"Benchmarking","text":" - In order to check that the ratelimit is working as expected, you can use any benchmarking tool, like hey
- You can use if you want a centos pod (better to create it on a different custer within the same Region):
kubectl apply -f centos-pod.yaml\n
- Connect to centos pod:
kubectl exec --stdin --tty centos -- /bin/bash\n
- And install
hey
with: [root@centos /]# curl -sf https://gobinaries.com/rakyll/hey | sh\n
- Now you can execute the benchmark using the following escenario:
Item Value Target AWS ELB DNS Name App pods 2 Limitador pods 2 Limits 1.000 rps per hostname Hey duration 1 minute Hey Traffic -c 60 -q 20 (around 1.200 rps) - Theoretically:
- It should let pass 1.000 requests, and limit 200 requests per second
- It should let pass 60 * 1.000 = 60.0000 requests, and limit 60 * 200 = 12.000 requests per minute
- Each limitador pod should handle half of the traffic (500 rps OK, and 200 rps limited)
[root@centos /]# hey -z 60s -c 60 -q 20 \"http://a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com\"\n\nSummary:\n Total: 60.0131 secs\n Slowest: 0.1028 secs\n Fastest: 0.0023 secs\n Average: 0.0075 secs\n Requests/sec: 1199.3721\n\n Total data: 106581650 bytes\n Size/request: 1480 bytes\n\nResponse time histogram:\n 0.002 [1] |\n 0.012 [70626] |\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\n 0.022 [1291] |\u25a0\n 0.032 [0] |\n 0.043 [0] |\n 0.053 [0] |\n 0.063 [0] |\n 0.073 [0] |\n 0.083 [0] |\n 0.093 [23] |\n 0.103 [37] |\n\n\nLatency distribution:\n 10% in 0.0053 secs\n 25% in 0.0063 secs\n 50% in 0.0073 secs\n 75% in 0.0085 secs\n 90% in 0.0096 secs\n 95% in 0.0102 secs\n 99% in 0.0139 secs\n\nDetails (average, fastest, slowest):\n DNS+dialup: 0.0001 secs, 0.0023 secs, 0.1028 secs\n DNS-lookup: 0.0001 secs, 0.0000 secs, 0.0711 secs\n req write: 0.0000 secs, 0.0000 secs, 0.0014 secs\n resp wait: 0.0074 secs, 0.0023 secs, 0.0303 secs\n resp read: 0.0000 secs, 0.0000 secs, 0.0049 secs\n\nStatus code distribution:\n [200] 60046 responses\n [429] 11932 responses\n
-
We can see that:
- Client could send 1192.2171rps (about 1200rps)
- 60046 requests (about 60000) were OK (HTTP 200)
- 11932 requests (about 12000) were limited (HTTP 429)
- Average latency (since the request goes out from the client to AWS ELB, k8s node, envoyproxy container, limitador+redis, kuar app container) is 10ms
-
In addition, if we do a longer test with 5 minutes traffic for example, you can check with the grafana dashboard how these requests are processed by envoyproxy sidecar container of kuard pods and limitador pods:
- Kuard Envoyproxy Sidecar Metrics:
- Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
- Each envoyproxy sidecar of each kuard pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is not 100% perfect, caused by random iptables forwarding when using a k8s service
- Limitador Metrics:
- Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
- Each limitador pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is perfect thanks to using a headless service with GRPC connections
"},{"location":"limitador/limitador-server/sandbox/","title":"Sandbox","text":""},{"location":"limitador/limitador-server/sandbox/#testing-environment","title":"Testing Environment","text":""},{"location":"limitador/limitador-server/sandbox/#requirements","title":"Requirements","text":" - docker v24+
"},{"location":"limitador/limitador-server/sandbox/#setup","title":"Setup","text":"Clone the project
git clone https://github.com/Kuadrant/limitador.git\ncd limitador/limitador-server/sandbox\n
Check out make help
for all the targets.
"},{"location":"limitador/limitador-server/sandbox/#deployment-options","title":"Deployment options","text":"Limitador's configuration Command Info In-memory configuration make deploy-in-memory
Counters are held in Limitador (ephemeral) Redis make deploy-redis
Uses Redis to store counters Redis Secured make deploy-redis-tls
Uses Redis with TLS and password protected to store counters Redis Cached make deploy-redis-cached
Uses Redis to store counters, with an in-memory cache Redis Otel Instrumented make deploy-redis-otel
Uses redis to store counters, instrumented with opentelemetry Disk make deploy-disk
Uses disk to store counters"},{"location":"limitador/limitador-server/sandbox/#limitadors-admin-http-endpoint","title":"Limitador's admin HTTP endpoint","text":"Limits
curl -i http://127.0.0.1:18080/limits/test_namespace\n
Counters
curl -i http://127.0.0.1:18080/counters/test_namespace\n
Metrics
curl -i http://127.0.0.1:18080/metrics\n
"},{"location":"limitador/limitador-server/sandbox/#limitadors-grpc-ratelimitservice-endpoint","title":"Limitador's GRPC RateLimitService endpoint","text":"Get grpcurl
. You need Go SDK installed.
Golang version >= 1.18 (from fullstorydev/grpcurl)
make grpcurl\n
Inspect RateLimitService
GRPC service
bin/grpcurl -plaintext 127.0.0.1:18081 describe envoy.service.ratelimit.v3.RateLimitService\n
Make a custom request
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
Do repeated requests. As the limit is set to max 5 request for 60 seconds, you should see OVER_LIMIT
response after 5 requests.
while :; do bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM; sleep 1; done\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
"},{"location":"limitador/limitador-server/sandbox/#downstream-traffic","title":"Downstream traffic","text":"Upstream service implemented by httpbin.org
curl -i -H \"Host: example.com\" http://127.0.0.1:18000/get\n
"},{"location":"limitador/limitador-server/sandbox/#limitador-image","title":"Limitador Image","text":"By default, the sandbox will run Limitador's limitador-testing:latest
image.
Building limitador-testing:latest
image
You can easily build the limitador's image from the current workspace code base with:
make build\n
The image will be tagged with limitador-testing:latest
Using custom Limitador's image
The LIMITADOR_IMAGE
environment variable overrides the default image. For example:
make deploy-in-memory LIMITADOR_IMAGE=quay.io/kuadrant/limitador:latest\n
"},{"location":"limitador/limitador-server/sandbox/#clean-env","title":"Clean env","text":"make clean\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/","title":"Limitador instrumentation sandbox","text":"Limitador is configured to push traces to an opentelemetry collector.
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-sandbox","title":"Run sandbox","text":"make build\nmake deploy-redis-otel\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-some-traffic","title":"Run some traffic","text":"make grpcurl\n
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#see-the-trace-in-ui","title":"See the trace in UI","text":"firefox -private-window \"localhost:16686\"\n
Recommended to start looking at check_and_update
operation.
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#tear-down-sandbox","title":"Tear down sandbox","text":"make clean\n
"},{"location":"limitador/limitador-server/sandbox/redis-tls/","title":"Index","text":""},{"location":"limitador/limitador-server/sandbox/redis-tls/#testing-redis-security","title":"Testing redis security","text":"Execute bash shell in redis pod
docker compose -p sandbox exec redis /bin/bash\n
Connect to this Redis server with redis-cli:
root@e024a29b74ba:/data# redis-cli --tls --cacert /usr/local/etc/redis/certs/ca.crt -a foobared\n
"},{"location":"limitador-operator/","title":"Limitador Operator","text":""},{"location":"limitador-operator/#overview","title":"Overview","text":"The Operator to manage Limitador deployments.
"},{"location":"limitador-operator/#customresourcedefinitions","title":"CustomResourceDefinitions","text":" - Limitador, which defines a desired Limitador deployment.
"},{"location":"limitador-operator/#limitador-crd","title":"Limitador CRD","text":"Limitador v1alpha1 API reference
Example:
---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n
"},{"location":"limitador-operator/#features","title":"Features","text":" - Storage Options
- Rate Limit Headers
- Logging
- Tracing
"},{"location":"limitador-operator/#contributing","title":"Contributing","text":"The Development guide describes how to build the operator and how to test your changes before submitting a patch or opening a PR.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"limitador-operator/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"limitador-operator/doc/development/","title":"Development Guide","text":""},{"location":"limitador-operator/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - operator-sdk version 1.32.0
- kind version v0.22.0
- git
- go version 1.21+
- kubernetes version v1.25+
- kubectl version v1.25+
"},{"location":"limitador-operator/doc/development/#build","title":"Build","text":"make\n
"},{"location":"limitador-operator/doc/development/#run-locally","title":"Run locally","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind with local-env-setup
.
make local-env-setup\n
Then, run the operator locally
make run\n
"},{"location":"limitador-operator/doc/development/#deploy-the-operator-in-a-deployment-object","title":"Deploy the operator in a deployment object","text":"make local-setup\n
"},{"location":"limitador-operator/doc/development/#deploy-the-operator-using-olm","title":"Deploy the operator using OLM","text":"You can deploy the operator using OLM just running a few commands. No need to build any image. Kuadrant engineering team provides latest
and released version tagged images. They are available in the Quay.io/Kuadrant image repository.
Create kind cluster
make kind-create-cluster\n
Deploy OLM system
make install-olm\n
Deploy the operator using OLM. The make deploy-catalog
target accepts the following variables:
Makefile Variable Description Default value CATALOG_IMG
Catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
"},{"location":"limitador-operator/doc/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"If you want to deploy (using OLM) a custom limitador operator, you need to build your own catalog.
"},{"location":"limitador-operator/doc/development/#build-operator-bundle-image","title":"Build operator bundle image","text":"The make bundle
target accepts the following variables:
Makefile Variable Description Default value Notes IMG
Operator image URL quay.io/kuadrant/limitador-operator:latest
VERSION
Bundle version 0.0.0
RELATED_IMAGE_LIMITADOR
Limitador bundle URL quay.io/kuadrant/limitador:latest
LIMITADOR_VERSION
var could be use to build this URL providing the tag - Build the bundle manifests
make bundle [IMG=quay.io/kuadrant/limitador-operator:latest] \\\n [VERSION=0.0.0] \\\n [RELATED_IMAGE_LIMITADOR=quay.io/kuadrant/limitador:latest]\n
- Build the bundle image from the manifests
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
- Push the bundle image to a registry
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
"},{"location":"limitador-operator/doc/development/#build-custom-catalog","title":"Build custom catalog","text":"The catalog format will be File-based Catalog.
Make sure all the required bundles are pushed to the registry. It is required by the opm
tool.
The make catalog
target accepts the following variables:
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
REPLACES_VERSION
Previous operator version 0.0.0-alpha
CHANNELS
Catalog channels preview
make catalog [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [REPLACES_VERSION=0.0.0-alpha] \\\n [CHANNELS=preview]\n
- Build the catalog image from the manifests
Makefile Variable Description Default value CATALOG_IMG
Operator catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
make catalog-build [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
- Push the catalog image to a registry
make catalog-push [CATALOG_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
You can try out your custom catalog image following the steps of the Deploy the operator using OLM section.
"},{"location":"limitador-operator/doc/development/#cleaning-up","title":"Cleaning up","text":"make local-cleanup\n
"},{"location":"limitador-operator/doc/development/#run-tests","title":"Run tests","text":""},{"location":"limitador-operator/doc/development/#unittests","title":"Unittests","text":"make test-unit\n
Optionally, add TEST_NAME
makefile variable to run specific test
make test-unit TEST_NAME=TestConstants\n
or even subtest
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
"},{"location":"limitador-operator/doc/development/#integration-tests","title":"Integration tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run local cluster with kind
make local-env-setup\n
Run integration tests
make test-integration\n
"},{"location":"limitador-operator/doc/development/#all-tests","title":"All tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run local cluster with kind
make local-env-setup\n
Run all tests
make test\n
"},{"location":"limitador-operator/doc/development/#lint-tests","title":"Lint tests","text":"make run-lint\n
"},{"location":"limitador-operator/doc/development/#uninstall-limitador-crd","title":"(Un)Install Limitador CRD","text":"You need an active session open to a kubernetes cluster.
Remove CRDs
make uninstall\n
"},{"location":"limitador-operator/doc/logging/","title":"Logging","text":"The limitador operator outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information. Actions like creating, deleting or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details.
To configure the desired log level, set the environment variable LOG_LEVEL
to one of the supported values listed above. Default log level is info
.
Apart from log level, the controller can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the environment variable LOG_MODE
to one of the supported values listed above. Default log mode is production
.
"},{"location":"limitador-operator/doc/rate-limit-headers/","title":"Rate Limit Headers","text":"It enables RateLimit Header Fields for HTTP as specified in Rate Limit Headers Draft
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n rateLimitHeaders: DRAFT_VERSION_03\n
Current valid values are:
- DRAFT_VERSION_03 (ref: Rate Limit Headers Draft)
- NONE
By default, when spec.rateLimitHeaders
is null, --rate-limit-headers
command line arg is not included in the limitador's deployment.
"},{"location":"limitador-operator/doc/resource-requirements/","title":"Resource Requirements","text":"The default resource requirement for Limitador deployments is specified in Limitador v1alpha1 API reference and will be applied if the resource requirement is not set in the spec.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: [] \n
Field json/yaml field Type Required Default value Description ResourceRequirements resourceRequirements
*corev1.ResourceRequirements No {\"limits\": {\"cpu\": \"500m\",\"memory\": \"64Mi\"},\"requests\": {\"cpu\": \"250m\",\"memory\": \"32Mi\"}}
Limitador deployment resource requirements"},{"location":"limitador-operator/doc/resource-requirements/#example-with-resource-limits","title":"Example with resource limits","text":"The resource requests and limits for the deployment can be set like the following:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n resourceRequirements:\n limits:\n cpu: 200m\n memory: 400Mi\n requests:\n cpu: 101m \n memory: 201Mi \n
To specify the deployment without resource requests or limits, set an empty struct {}
to the field:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [ \"get_toy == 'yes'\" ]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n resourceRequirements: {}\n
"},{"location":"limitador-operator/doc/storage/","title":"Storage","text":"Limitador limits counters are stored in a backend storage. This is In contrast to the storage of the limits themselves, which are always stored in ephemeral memory. Limitador's operator supports several storage configurations:
- In-Memory: ephemeral and cannot be shared
- Redis: Persistent (depending on the redis storage configuration) and can be shared
- Redis Cached: Persistent (depending on the redis storage configuration) and can be shared
- Disk: Persistent (depending on the underlying disk persistence capabilities) and cannot be shared
"},{"location":"limitador-operator/doc/storage/#in-memory","title":"In-Memory","text":"Counters are held in Limitador (ephemeral)
In-Memory is the default option defined by the Limitador's Operator.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage: null\n
For any of those, one should store the URL of the Redis service, inside a K8s opaque Secret.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
"},{"location":"limitador-operator/doc/storage/#redis","title":"Redis","text":"Uses Redis to store counters.
Selected when spec.storage.redis
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n
The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador
CR.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
Note: Limitador's Operator will only read the URL
field of the secret.
"},{"location":"limitador-operator/doc/storage/#redis-cached","title":"Redis Cached","text":"Uses Redis to store counters, with an in-memory cache.
Selected when spec.storage.redis-cached
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis-cached:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n
The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador
CR.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
Note: Limitador's Operator will only read the URL
field of the secret.
Additionally, caching options can be specified in the spec.storage.redis-cached.options
field.
"},{"location":"limitador-operator/doc/storage/#options","title":"Options","text":"Option Description batch-size
Size of entries to flush in as single flush [default: 100] flush-period
Flushing period for counters in milliseconds [default: 1000] max-cached
Maximum amount of counters cached [default: 10000] response-timeout
Timeout for Redis commands in milliseconds [default: 350] For example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis-cached:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n options: # Every option is optional\n batch-size: 50\n max-cached: 5000\n
"},{"location":"limitador-operator/doc/storage/#disk","title":"Disk","text":"Counters are held on disk (persistent). Kubernetes Persistent Volumes will be used to store counters.
Selected when spec.storage.disk
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk: {}\n
Additionally, disk options can be specified in the spec.storage.disk.persistentVolumeClaim
and spec.storage.disk.optimize
fields.
"},{"location":"limitador-operator/doc/storage/#persistent-volume-claim-options","title":"Persistent Volume Claim Options","text":"spec.storage.disk.persistentVolumeClaim
field is an object with the following fields.
Field Description storageClassName
StorageClass of the storage offered by cluster administrators [default: default storage class of the cluster] resources
The minimum resources the volume should have. Resources will not take any effect when VolumeName is provided. This parameter is not updateable when the underlying PV is not resizable. [default: 1Gi] volumeName
The binding reference to the existing PersistentVolume backing this claim [default: null] Example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk:\n persistentVolumeClaim:\n storageClassName: \"customClass\"\n resources:\n requests: 2Gi\n
"},{"location":"limitador-operator/doc/storage/#optimize","title":"Optimize","text":"Defines the valid optimization option of the disk persistence type.
spec.storage.disk.optimize
field is a string
type with the following valid values:
Option Description throughput
Optimizes for higher throughput. Default disk
Optimizes for disk usage Example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk:\n optimize: disk\n
"},{"location":"limitador-operator/doc/tracing/","title":"Tracing","text":"Limitador offers distributed tracing enablement using the .spec.tracing
CR configuration:
---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n verbosity: 3\n tracing:\n endpoint: rpc://my-otlp-collector:4317\n
Currently limitador only supports collectors using the OpenTelemetry Protocol with TLS disabled. The endpoint
configuration option should contain the scheme, host and port of the service. The quantity and level of the information provided by the spans is configured via the verbosity
argument.
"},{"location":"multicluster-gateway-controller/","title":"multicluster-gateway-controller","text":""},{"location":"multicluster-gateway-controller/#description","title":"Description:","text":"The multi-cluster gateway controller, leverages the gateway API standard and Open Cluster Management to provide multi-cluster connectivity and global load balancing
Key Features:
- Central Gateway Definition that can then be distributed to multiple clusters
- Automatic TLS and cert distribution for HTTPS based listeners
- DNSPolicy to decide how North-South based traffic should be balanced and reach the gateways
- Health checks to detect and take remedial action against unhealthy endpoints
- Cloud DNS provider integrations (AWS route 53) with new ones being added (google DNS)
When deploying the multicluster gateway controller using the make targets, the following will be created:
- Kind cluster(s)
- Gateway API CRDs in the control plane cluster
- Ingress controller
- Cert manager
- LetsEncrypt certs
"},{"location":"multicluster-gateway-controller/#prerequisites","title":"Prerequisites:","text":" - AWS or GCP
- Various dependencies installed into $(pwd)/bin e.g. kind, yq etc.
- Run
make dependencies
- openssl>=3
- On macOS a later version is available with
brew install openssl
. You'll need to update your PATH as macOS provides an older version via libressl as well - On Fedora use
dnf install openssl
- go >= 1.21
"},{"location":"multicluster-gateway-controller/#1-running-the-controller-in-the-cluster","title":"1. Running the controller in the cluster:","text":" -
Set up your DNS Provider by following these steps
-
Setup your local environment
make local-setup MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER>\n
-
Build the controller image and load it into the control plane sh kubectl config use-context kind-mgc-control-plane make kind-load-gateway-controller
-
Deploy the controller(s) to the control plane cluster
make deploy-gateway-controller\n
-
(Optional) View the logs of the deployed controller
kubectl logs -f $(kubectl get pods -n multi-cluster-gateways | grep \"mgc-\" | awk '{print $1}') -n multi-cluster-gateways\n
"},{"location":"multicluster-gateway-controller/#2-running-the-controller-locally","title":"2. Running the controller locally:","text":" -
Set up your DNS Provider by following these steps
-
Setup your local environment
make local-setup MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER>\n
-
Run the controller locally:
kubectl config use-context kind-mgc-control-plane \nmake build-gateway-controller run-gateway-controller\n
"},{"location":"multicluster-gateway-controller/#3-clean-up-local-environment","title":"3. Clean up local environment","text":"In any terminal window target control plane cluster by:
kubectl config use-context kind-mgc-control-plane \n
If you want to wipe everything clean consider using: make local-cleanup # Remove kind clusters created locally and cleanup any generated local files.\n
If the intention is to cleanup kind cluster and prepare them for re-installation consider using: make local-cleanup-mgc MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER> # prepares clusters for make local-setup-mgc\n
"},{"location":"multicluster-gateway-controller/#license","title":"License","text":"Copyright 2022 Red Hat.
Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0\n
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/","title":"Debugging in VS code","text":""},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#introduction","title":"Introduction","text":"The following document will show how to setup debugging for multi gateway controller.
There is an included VSCode launch.json
.
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#starting-the-controller","title":"Starting the controller","text":"Instead of starting the Gateway Controller via something like:
make build-{policy | gateway}-controller install run-{policy | gateway}-controller\n
You can now simply hit F5
in VSCode. The controller will launch with the following config:
{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug\",\n \"type\": \"go\",\n \"request\": \"launch\",\n \"mode\": \"auto\",\n \"program\": \"./cmd/controller/main.go\",\n \"args\": [\n \"--metrics-bind-address=:8080\",\n \"--health-probe-bind-address=:8081\"\n ]\n }\n ]\n}\n
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#running-debugger","title":"Running Debugger","text":""},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#debugging-tests","title":"Debugging Tests","text":""},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/","title":"Distributing Gateways with OCM","text":""},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#define-and-place-gateways","title":"Define and Place Gateways","text":"In this guide, we will go through defining a Gateway in the OCM hub cluster that can then be distributed to and instantiated on a set of managed spoke clusters.
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#prerequisites","title":"Prerequisites","text":" - Complete the Getting Started Guide to bring up a suitable environment.
If you are looking to change provider from the default Istio:
- Please have the Gateway provider of your choice installed and configured (in this example we use Envoy gateway. See the following docs)
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#initial-setup","title":"Initial setup","text":"export MGC_SUB_DOMAIN
in each terminal if you haven't already added it to your .zshrc
or .bash_profile
.
Going through the quick start above, will ensure that a supported GatewayClass
is registered in the hub cluster that the Kuadrant multi-cluster gateway controller will handle.
NOTE The quick start script will create a placement resource as part of the setup. You can use this as further inspiration for other placement resources you would like to create.
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#defining-a-gateway","title":"Defining a Gateway","text":"Once you have the Kuadrant multi-cluster gateway controller installed into the OCM hub cluster, you can begin defining and placing Gateways across your OCM managed infrastructure.
To define a Gateway and have it managed by the multi-cluster gateway controller, we need to do the following things
- Create a Gateway API Gateway resource in the Hub cluster, ensuring the gateway resource specifies the correct gateway class allowing it to be picked up and managed by the multi-cluster gateway controller
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: $MGC_SUB_DOMAIN\n port: 443\n protocol: HTTP\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#placing-a-gateway","title":"Placing a Gateway","text":"To place a gateway, we will need to create a Placement resource.
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: cluster.open-cluster-management.io/v1beta1\nkind: Placement\nmetadata:\n name: http-gateway-placement\n namespace: multi-cluster-gateways\nspec:\n clusterSets:\n\n - gateway-clusters # defines which ManagedClusterSet to use. \n numberOfClusters: 2 # defines how many clusters to select from the chosen clusterSets\nEOF\n
For more information on ManagedClusterSets and placements please see the OCM official docs: -
ManagedClusterSets
-
Placements
Finally in order to have the Gateway instances deployed to your spoke clusters that can start receiving traffic, you need to place the gateway.
-
To place the gateway, we need to add a placement label to gateway resource to instruct the gateway controller where we want this gateway instantiated.
kubectl --context kind-mgc-control-plane label gateway prod-web \"cluster.open-cluster-management.io/placement\"=\"http-gateway-placement\" -n multi-cluster-gateways\n
-
To have the gateway deployed to 2 clusters, you can add a second cluster to the clusterset by running the following:
kubectl --context kind-mgc-control-plane label managedcluster kind-mgc-workload-1 ingress-cluster=true\n
As the placement specifies numberOfClusters
as 2 your gateway will automatically be instantiated on the second cluster.
-
To find a configured gateway and instantiated gateway on the hub cluster. Run the following
kubectl --context kind-mgc-control-plane get gateway -A\n
You'll see the following:
kuadrant-multi-cluster-gateways prod-web istio 172.31.200.0 29s\nmulti-cluster-gateways prod-web kuadrant-multi-cluster-gateway-instance-per-cluster True 2m42s\n
-
Execute the following to see the gateway on the workload-1 cluster:
kubectl --context kind-mgc-workload-1 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.201.0 90s\n
While we recommend using Istio as the gateway provider as that is how you will get access to the full suite of policy APIs, it is possible to use another provider if you choose to however this will result in a reduced set of applicable policy objects.
If you are only using the DNSPolicy and TLSPolicy resources, you can use these APIs with any Gateway provider. To change the underlying provider, you need to set the gatewayclass param downstreamClass
.
-
Create the following configmap. Note: In this example, 'eg' stands for the Envoy gateway, which is mentioned in the prerequisites above:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: v1\ndata:\n params: |\n {\n \"downstreamClass\": \"eg\"\n }\nkind: ConfigMap\nmetadata:\n name: gateway-params\n namespace: multi-cluster-gateways\nEOF\n
-
Update the gatewayclass to include the above Configmap
kubectl --context kind-mgc-control-plane patch gatewayclass kuadrant-multi-cluster-gateway-instance-per-cluster -n multi-cluster-gateways --type merge --patch '{\"spec\":{\"parametersRef\":{\"group\":\"\",\"kind\":\"ConfigMap\",\"name\":\"gateway-params\",\"namespace\":\"multi-cluster-gateways\"}}}'\n
Once this has been created, any gateways created from that gateway class will result in a downstream gateway being provisioned with the configured downstreamClass. Run the following in both your hub and spoke cluster to see the gateways:
kubectl --context kind-mgc-control-plane get gateway -A\n
kubectl --context kind-mgc-workload-1 get gateway -A\n
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#using-a-different-gateway-provider","title":"Using a different gateway provider?","text":""},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/","title":"Gateway Deletion","text":""},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#gateway-deletion","title":"Gateway deletion","text":"When deleting a gateway it should ONLY be deleted in the control plane cluster. This will the trigger the following events:
"},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#workload-clusters","title":"Workload cluster(s):","text":" - The corresponding gateway in the workload clusters will also be deleted.
"},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#control-plane-clusters","title":"Control plane cluster(s):","text":" -
DNS Record deletion:
Gateways and DNS records have a 1:1 relationship only, when a gateway gets deleted the corresponding DNS record also gets marked for deletion. This then triggers the DNS record to be removed from the managed zone in the DNS provider (currently only route 53 is accepted).
-
Certs and secrets deletion :
When a gateway is created a cert is also created for the host in the gateway, this is also removed when the gateway is deleted.
"},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/","title":"API Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/#introduction","title":"Introduction","text":"This document will detail the setup of a reference architecture to support a number of API management use-cases connecting Kuadrant with other projects the wider API management on Kubernetes ecosystem.
"},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/#petstore-app-deployment","title":"Petstore App Deployment","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/","title":"Metrics walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#introduction","title":"Introduction","text":"This walkthrough shows how to install a metrics federation stack locally and query Istio metrics from the hub.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#video-walkthrough","title":"Video Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#installation-and-configuration-of-metrics","title":"Installation and Configuration of Metrics","text":"This document will guide you in installing metrics for your application and provide directions on where to access them. Additionally, it will include dashboards set up to display these metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#requirementsprerequisites","title":"Requirements/prerequisites","text":"Prior to commencing the metrics installation process, it is imperative that you have successfully completed the initial getting started guide. For reference, please consult the guide available at the following link: Getting Started Guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#setting-up-metrics","title":"Setting Up Metrics","text":"To establish metrics, simply execute the following script in your terminal:
curl https://raw.githubusercontent.com/kuadrant/multicluster-gateway-controller/main/hack/quickstart-metrics.sh | bash\n
This script will initiate the setup process for your metrics configuration. After the script finishes running, you should see something like:
Connect to Thanos Query UI\n URL: https://thanos-query.172.31.0.2.nip.io\n\nConnect to Grafana UI\n URL: https://grafana.172.31.0.2.nip.io\n
You can visit the Grafana dashboard by accessing the provided URL for Grafana UI. (you may need to scroll)
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#monitoring-operational-status-in-grafana-dashboard","title":"Monitoring Operational Status in Grafana Dashboard","text":"After setting up metrics, you can monitor the operational status of your system using the Grafana dashboard.
To generate traffic to the application, use curl
as follows:
while true; do curl -k https://$MGC_SUB_DOMAIN && sleep 5; done\n
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#accessing-the-thanos-ui","title":"Accessing the Thanos UI","text":" - Access the Thanos UI by clicking or entering the provided URL for the Grafana UI in your web browser.
https://thanos-query.172.31.0.2.nip.io\n
- In the Thanos UI query box, enter the below query and press 'Execute'
sum(rate(container_cpu_usage_seconds_total{namespace=\"monitoring\",container=\"prometheus\"}[5m]))\n
You should see a response in the table view. In the Graph view you should see some data over time as well.
sum(rate(istio_requests_total{}[5m])) by(destination_workload)\n
In the graph view you should see something that looks like the graph below. This shows the rate of requests (per second) for each Isito workload. In this case, there is 1 workload, balanced across 2 clusters.
To see the rate of requests per cluster (actually per pod across all clusters), the below query can be used. Over long periods of time, this graph can show traffic load balancing between application instances.
sum(rate(istio_requests_total{}[5m])) by(pod)\n
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#accessing-the-grafana-dashboard","title":"Accessing the Grafana Dashboard","text":"To view the operational metrics and status, proceed with the following steps:
- Access the Grafana dashboard by clicking or entering the provided URL for the Grafana UI in your web browser.
https://grafana.172.31.0.2.nip.io\n
Note: The default login credentials for Grafana are admin/admin. You may need to accept the non-CA signed certificate to proceed.
- Navigate to the included Grafana Dashboard
Using the left sidebar in the Grafana UI, navigate to Dashboards > Browse
and select the Istio Workload Dashboard
, MGC SRE Dashboard
or any of the following Gateway Api State
dashboards.
In Istio Workload Dashboard
you should be able to see the following layout, which will include data from the curl
command you ran in the previous section.
The MGC SRE Dashboard
displays real-time insights and visualizations of resources managed by the multicluster-gateway-controller e.g. DNSPolicy, TLSPolicy, DNSRecord etc..
The Gateway API State / Gateways
provides real-time insights and visualizations for Gateways. It offers information about gateway listeners, listener status, gateway status, addresses, and attached routes
The Gateway API State / GatewayClasses
provides insights into Gateways organized by their respective Gateway Classes. It offers information about GatewayClasses and the supported features for each class.
The Gateway API State / HTTPRoutes
or any of the remaining routes focuses on their Routes
and provides insights into their configuration. It displays their targeted parent references, and attached parent references, offering a detailed view of how these routes are structured and associated with their respective resources.
The Grafana dashboard will provide you with real-time insights and visualizations of your gateway's performance and metrics.
By utilizing the Grafana dashboard, you can effectively monitor the health and behavior of your system, making informed decisions based on the displayed data. This monitoring capability enables you to proactively identify and address any potential issues to ensure the smooth operation of your environment.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/","title":"Multicluster Gateways Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#introduction","title":"Introduction","text":"This document will walk you through using Open Cluster Management (OCM) and Kuadrant to configure and deploy a multi-cluster gateway.
You will also deploy a simple application that uses that gateway for ingress and protects that applications endpoints with a rate limit policy.
We will start with a hub cluster and 2 workload clusters and highlight the automatic TLS integration and also the automatic DNS load balancing between gateway instances.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#requirements","title":"Requirements","text":" - Complete the Getting Started - Multi Cluster Guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#initial-setup","title":"Initial Setup","text":"In this walkthrough, we'll deploy test echo services across multiple clusters. If you followed the Getting Started - Multi Cluster Guide, you would have already set up a KUADRANT_ZONE_ROOT_DOMAIN
environment variable. For this tutorial, we'll derive a host from this domain for these echo services.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#create-a-gateway","title":"Create a gateway","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#check-the-managed-zone","title":"Check the managed zone","text":" -
First let's ensure the managedzone
is present:
kubectl get managedzone -n multi-cluster-gateways --context kind-mgc-control-plane\n
You should see the following: NAME DOMAIN NAME ID RECORD COUNT NAMESERVERS READY\nmgc-dev-mz test.hcpapps.net /hostedzone/Z08224701SVEG4XHW89W0 7 [\"ns-1414.awsdns-48.org\",\"ns-1623.awsdns-10.co.uk\",\"ns-684.awsdns-21.net\",\"ns-80.awsdns-10.com\"] True\n
You are now ready to begin creating a gateway!
- We will now create a multi-cluster gateway definition in the hub cluster:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$KUADRANT_ZONE_ROOT_DOMAIN\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: apps-hcpapps-tls\n kind: Secret\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#enable-tls","title":"Enable TLS","text":" -
Create a TLSPolicy and attach it to your Gateway:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: glbc-ca \nEOF\n
-
You should now see a Certificate resource in the hub cluster:
kubectl --context kind-mgc-control-plane get certificates -A\n
You should see the following: NAMESPACE NAME READY SECRET AGE\nmulti-cluster-gateways apps-hcpapps-tls True apps-hcpapps-tls 12m\n
It is possible to also use a letsencrypt certificate, but for simplicity in this walkthrough we are using a self-signed cert.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#place-the-gateway","title":"Place the gateway","text":"In the hub cluster there will be a single gateway definition but no actual gateway for handling traffic yet. This is because we haven't placed the gateway yet onto any of our ingress clusters.
-
To place the gateway, we need to add a placement label to gateway resource to instruct the gateway controller where we want this gateway instantiated:
kubectl --context kind-mgc-control-plane label gateway prod-web \"cluster.open-cluster-management.io/placement\"=\"http-gateway\" -n multi-cluster-gateways\n
-
On the hub cluster you should find there is a configured gateway:
kubectl --context kind-mgc-control-plane get gateway -A\n
you'll see the following: multi-cluster-gateways prod-web kuadrant-multi-cluster-gateway-instance-per-cluster True 2m42s\n
Later on we will add in another ingress cluster and in that case you will see the instantiated gateway.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#enable-dns","title":"Enable DNS","text":" -
Create a DNSPolicy and attach it to your Gateway:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway \nEOF\n
Once this is done, the Kuadrant multi-cluster gateway controller will pick up when a HTTPRoute has been attached to the Gateway it is managing from the hub and it will setup a DNS record to start bringing traffic to that gateway for the host defined in that listener.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#introducing-the-workload-clusters","title":"Introducing the workload clusters","text":"So now we have a working gateway with DNS and TLS configured. Let's place this gateway on the workload clusters and bring traffic to those gateways also.
-
We need to modify our placement to update our numberOfClusters
to 2. To patch, run:
kubectl --context kind-mgc-control-plane patch placement http-gateway -n multi-cluster-gateways --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/numberOfClusters\", \"value\": 2}]'\n
-
Run the following to see the gateway on the workload-1 cluster:
kubectl --context kind-mgc-workload-1 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.201.0 90s\n
-
Run the following to see the gateway on the workload-2 cluster:
kubectl --context kind-mgc-workload-2 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.202.0 90s\n
Additionally, you should be able to see a secret containing a self-signed certificate.
-
There should also be an associated TLS secret:
kubectl --context kind-mgc-workload-1 get secrets -n kuadrant-multi-cluster-gateways\n
you'll see the following: NAME TYPE DATA AGE\napps-hcpapps-tls kubernetes.io/tls 3 13m\n
And in the second workload cluster
kubectl --context kind-mgc-workload-2 get secrets -n kuadrant-multi-cluster-gateways\n
you'll see the following: NAME TYPE DATA AGE\napps-hcpapps-tls kubernetes.io/tls 3 13m\n
The listener is configured to use this TLS secret also. So now our gateway has been placed and is running in the right locations with the right configuration and TLS has been setup for the HTTPS listeners.
So now we have workload ingress clusters configured with the same Gateway.
-
Let's create the HTTPRoute in the first workload cluster. Again, remembering to replace the hostname accordingly if you haven't already set a value for the KUADRANT_ZONE_ROOT_DOMAIN
variable as described in the Getting Started - Multi Cluster Guide:
kubectl --context kind-mgc-workload-1 apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n parentRefs:\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - \"echo.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: echo\n port: 8080\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: echo\nspec:\n ports:\n - name: http-port\n port: 8080\n targetPort: http-port\n protocol: TCP\n selector:\n app: echo \n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: echo\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: echo\n template:\n metadata:\n labels:\n app: echo\n spec:\n containers:\n - name: echo\n image: docker.io/jmalloc/echo-server\n ports:\n - name: http-port\n containerPort: 8080\n protocol: TCP \nEOF\n
-
Let's create the same HTTPRoute in the second workload cluster. Note the --context
references the second cluster
kubectl --context kind-mgc-workload-2 apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n parentRefs:\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - \"echo.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: echo\n port: 8080\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: echo\nspec:\n ports:\n - name: http-port\n port: 8080\n targetPort: http-port\n protocol: TCP\n selector:\n app: echo \n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: echo\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: echo\n template:\n metadata:\n labels:\n app: echo\n spec:\n containers:\n - name: echo\n image: docker.io/jmalloc/echo-server\n ports:\n - name: http-port\n containerPort: 8080\n protocol: TCP \nEOF\n
-
If we take a look at the dnsrecord, you will see we now have two A records configured:
kubectl --context kind-mgc-control-plane get dnsrecord -n multi-cluster-gateways -o=yaml\n
-
Give DNS a minute or two to update. You should then be able to execute the following and get back the correct A record.
dig echo.$KUADRANT_ZONE_ROOT_DOMAIN\n
-
You should also be able to curl that endpoint
curl -k https://echo.$KUADRANT_ZONE_ROOT_DOMAIN\n\n# Request served by echo-XXX-XXX\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#watching-dns-changes","title":"Watching DNS changes","text":"If you want you can use watch dig echo.$KUADRANT_ZONE_ROOT_DOMAIN
to see the DNS switching between the two addresses
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#follow-on-walkthroughs","title":"Follow-on Walkthroughs","text":"Here are some good, follow-on guides that build on this walkthrough:
- Simple RateLimitPolicy for App Developers
- Deploying/Configuring Metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/","title":"Multicluster LoadBalanced DNSPolicy","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#terms","title":"Terms","text":" GatewayAPI
: resources that model service networking in Kubernetes. Gateway
: Kubernetes Gateway resource. ManagedZone
: Kuadrant resource representing a Zone Apex in a dns provider. DNSPolicy
: Kuadrant policy for managing gateway dns. DNSRecord
: Kuadrant resource representing a set of records in a managed zone.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dns-provider-setup","title":"DNS Provider Setup","text":"A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about dns providers, and what domains these dns providers are currently hosting. This is done through the creation of ManagedZones and dns provider secrets containing the credentials for the dns provider account.
If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net
:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: echo.apps.hcpapps.net\n port: 80\n protocol: HTTP\n
In order for the DNSPolicy to act upon that listener, a ManagedZone must exist for that hostnames domain.
A secret containing the provider credentials must first be created:
kubectl create secret generic my-aws-credentials --type=kuadrant.io/aws --from-env-file=./aws-credentials.env -n multi-cluster-gateways\nkubectl get secrets my-aws-credentials -n multi-cluster-gateways -o yaml\napiVersion: v1\ndata:\n AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n AWS_REGION: <AWS_REGION>\n AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\nkind: Secret\nmetadata:\n name: my-aws-credentials\n namespace: multi-cluster-gateways\ntype: kuadrant.io/aws\n
And then a ManagedZone can be added for the desired domain referencing the provider credentials:
apiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: apps.hcpapps.net\n namespace: multi-cluster-gateways\nspec:\n domainName: apps.hcpapps.net\n description: \"apps.hcpapps.net managed domain\"\n dnsProviderSecretRef:\n name: my-aws-credentials\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dnspolicy-creation-and-attachment","title":"DNSPolicy creation and attachment","text":"Once an appropriate ManagedZone is configured for a Gateways listener hostname, we can now create and attach a DNSPolicy to start managing dns for it.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n healthCheck:\n allowInsecureCertificates: true\n additionalHeadersRef:\n name: probe-headers\n endpoint: /\n expectedResponses:\n\n - 200\n - 201\n - 301\n failureThreshold: 5\n port: 80\n protocol: http\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#target-reference","title":"Target Reference","text":"targetRef
field is taken from policy attachment's target reference API. It can only target one resource at a time. Fields included inside:
Group
is the group of the target resource. Only valid option is gateway.networking.k8s.io
. Kind
is kind of the target resource. Only valid options are Gateway
. Name
is the name of the target resource. Namespace
is the namespace of the referent. Currently only local objects can be referred so value is ignored.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#health-check","title":"Health Check","text":"The health check section is optional, the following fields are available:
allowInsecureCertificates
: Added for development environments, allows health probes to not fail when finding an invalid (e.g. self-signed) certificate. additionalHeadersRef
: A reference to a secret which contains additional headers such as an authentication token endpoint
: The path to specify for these health checks, e.g. /healthz
expectedResponses
: Defaults to 200 or 201, this allows other responses to be considered valid failureThreshold
: How many consecutive fails are required to consider this endpoint unhealthy port
: The port to connect to protocol
: The protocol to use for this connection
For more information about DNS Health Checks, see this guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#checking-status-of-health-checks","title":"Checking status of health checks","text":"To list all health checks:
kubectl get dnshealthcheckprobes -A\n
This will list all probes in the hub cluster, and whether they are currently healthy or not. To find more information on why a specific health check is failing, look at the status of that probe:
kubectl get dnshealthcheckprobe <name> -n <namespace> -o yaml\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dnsrecord-resources","title":"DNSRecord Resources","text":"The DNSPolicy will create a DNSRecord resource for each listener hostname with a suitable ManagedZone configured. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.
Given the following Gateway status:
status:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-1/172.31.201.1\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-2/172.31.202.1\n conditions:\n - lastTransitionTime: \"2023-07-24T19:09:54Z\"\n message: Handled by kuadrant.io/mgc-gw-controller\n observedGeneration: 1\n reason: Accepted\n status: \"True\"\n type: Accepted\n - lastTransitionTime: \"2023-07-24T19:09:55Z\"\n message: 'gateway placed on clusters [kind-mgc-workload-1 kind-mgc-workload-2] '\n observedGeneration: 1\n reason: Programmed\n status: \"True\"\n type: Programmed\n listeners:\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-1.api\n supportedKinds: []\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-2.api\n supportedKinds: [] \n
The example DNSPolicy shown above would create a DNSRecord like the following:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n creationTimestamp: \"2023-07-24T19:09:56Z\"\n finalizers:\n\n - kuadrant.io/dns-record\n generation: 3\n labels:\n kuadrant.io/Gateway-uid: 0877f97c-f3a6-4f30-97f4-e0d7f25cc401\n kuadrant.io/record-id: echo\n name: echo.apps.hcpapps.net\n namespace: multi-cluster-gateways\n ownerReferences:\n - apiVersion: gateway.networking.k8s.io/v1\n kind: Gateway\n name: echo-app\n uid: 0877f97c-f3a6-4f30-97f4-e0d7f25cc401\n - apiVersion: kuadrant.io/v1alpha1\n blockOwnerDeletion: true\n controller: true\n kind: ManagedZone\n name: apps.hcpapps.net\n uid: 26a06799-acff-476b-a1a3-c831fd19dcc7\n resourceVersion: \"25464\"\n uid: 365bf57f-10b4-42e8-a8e7-abb6dce93985\nspec:\n endpoints:\n - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n managedZone:\n name: apps.hcpapps.net \n
Which results in the following records being created in AWS Route53 (The provider we used in our example ManagedZone above):
The listener hostname is now be resolvable through dns:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
More information about the dns record structure can be found in the DNSRecord structure document.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#load-balancing","title":"Load Balancing","text":"Configuration of DNS Load Balancing features is done through the loadBalancing
field in the DNSPolicy spec.
loadBalancing
field contains the specification of how dns will be configured in order to provide balancing of load across multiple clusters. Fields included inside:
weighted
field describes how weighting will be applied to weighted dns records. Fields included inside: defaultWeight
arbitrary weight value that will be applied to weighted dns records by default. Integer greater than 0 and no larger than the maximum value accepted by the target dns provider. custom
array of custom weights to apply when custom attribute values match. geo
field enables the geo routing strategy. Fields included inside: defaultGeo
geo code to apply to geo dns records by default. The values accepted are determined by the target dns provider.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#weighted","title":"Weighted","text":"A DNSPolicy with an empty loadBalancing
spec, or with a loadBalancing.weighted.defaultWeight
set and nothing else produces a set of records grouped and weighted to produce a Round Robin routing strategy where all target clusters will have an equal chance of being returned in DNS queries.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 100 # <--- New Default Weight being added\n
The weight of all records is adjusted to reflect the new defaultWeight
value of 100
. This will still produce the same Round Robin routing strategy as before since all records still have equal weight values.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#custom-weights","title":"Custom Weights","text":"In order to manipulate how much traffic individual clusters receive, custom weights can be added to the DNSPolicy.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 120\n custom: # <--- New Custom Weights being added\n\n - weight: 255\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n - weight: 10\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: GCP\n
And apply custom-weight
labels to each of our managed cluster resources:
kubectl label --overwrite managedcluster kind-mgc-workload-1 kuadrant.io/lb-attribute-custom-weight=AWS\nkubectl label --overwrite managedcluster kind-mgc-workload-2 kuadrant.io/lb-attribute-custom-weight=GCP\n
The DNSRecord for our listener host gets updated, and the weighted records are adjusted to have the new values:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"10\" # <--- Weight is updated\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"255\" # <--- Weight is updated\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n
In the above scenario the managed cluster kind-mgc-workload-2
(GCP) IP address will be returned far less frequently in DNS queries than kind-mgc-workload-1
(AWS)
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#geo","title":"Geo","text":"To enable Geo Load balancing the loadBalancing.geo.defaultGeo
field should be added. This informs the DNSPolicy that we now want to start making use of Geo Location features in our target provider. This will change the single record set group created from default
(What is created for weighted only load balancing) to a geo specific one based on the value of defaultGeo
.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 120\n custom:\n\n - weight: 255\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n - weight: 10\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: GCP\n geo:\n defaultGeo: US # <--- New `geo.defaultGeo` added for `US` (United States)\n
The DNSRecord for our listener host gets updated, and the default geo is replaced with the one we specified:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net # <--- New `us` geo location CNAME is created\n providerSpecific:\n - name: geo-country-code\n value: US\n recordTTL: 300\n recordType: CNAME\n setIdentifier: US\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net # <--- Default catch all CNAME is updated to point to `us` target\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net # <--- Gateway default group is now `us`\n providerSpecific:\n - name: weight\n value: \"10\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net # <--- Gateway default group is now `us`\n providerSpecific:\n - name: weight\n value: \"255\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n
The listener hostname is still resolvable, but now routed through the us
record set:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\nus.lb-2903yb.echo.apps.hcpapps.net. # <--- `us` CNAME now in the chain\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#configuring-cluster-geo-locations","title":"Configuring Cluster Geo Locations","text":"The defaultGeo
as described above puts all clusters into the same geo group, but for geo to be useful we need to mark our clusters as being in different locations. We can do this though by adding geo-code
attributes on the ManagedCluster to show which county each cluster is in. The values that can be used are determined by the dns provider (See Below).
Apply geo-code
labels to each of our managed cluster resources:
kubectl label --overwrite managedcluster kind-mgc-workload-1 kuadrant.io/lb-attribute-geo-code=US\nkubectl label --overwrite managedcluster kind-mgc-workload-2 kuadrant.io/lb-attribute-geo-code=ES\n
The above indicates that kind-mgc-workload-1
is located in the US (United States), which is the same as our current default geo, and kind-mgc-workload-2
is in ES (Spain).
The DNSRecord for our listener host gets updated, and records are now divided into two groups, us and es:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: es.lb-2903yb.echo.apps.hcpapps.net # <--- kind-mgc-workload-2 target now added to `es` group\n providerSpecific:\n - name: weight\n value: \"10\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net # <--- New `es` geo location CNAME is created\n providerSpecific:\n - name: geo-country-code\n value: ES\n recordTTL: 300\n recordType: CNAME\n setIdentifier: ES\n targets:\n - es.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: US\n recordTTL: 300\n recordType: CNAME\n setIdentifier: US\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"255\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n
In the above scenario any requests made in Spain will be returned the IP address of kind-mgc-workload-2
and requests made from anywhere else in the world will be returned the IP address of kind-mgc-workload-1
. Weighting of records is still enforced between clusters in the same geo group, in the case above however they are having no effect since there is only one cluster in each group.
If an unsupported value is given to a provider, DNS records will not be created. Please choose carefully. For more information on what location is right for your needs please, read that provider's documentation (see links below).
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#locations-supported-per-dns-provider","title":"Locations supported per DNS provider","text":"Supported AWS GCP Continents Country codes States Regions"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#continents-and-country-codes-supported-by-aws-route-53","title":"Continents and country codes supported by AWS Route 53","text":":Note: For more information please the official AWS documentation
To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#regions-supported-by-gcp-cloud-dns","title":"Regions supported by GCP CLoud DNS","text":"To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/","title":"Simple Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#requirements","title":"Requirements","text":" - Complete the Multicluster Gateways Walkthrough where you'll have an environment configured with a Gateway that we'll use in this guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#overview","title":"Overview","text":"In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.$KUADRANT_ZONE_ROOT_DOMAIN
, where it exposes the endpoints GET /toys*
and POST /toys
, respectively, to mimic operations of reading and writing toy records.
We will rate limit the POST /toys
endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#1-deploy-the-toy-store-api","title":"\u2460 Deploy the Toy Store API","text":""},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#create-the-deployment","title":"Create the Deployment","text":"Note: You can skip this step and proceed to Create the HTTPRoute if you've already deployed the Toy Store API as part of the AuthPolicy for Application Developers and Platform Engineers guide.
Create the deployments for both clusters we've created previously (kind-mgc-workload-1
& kind-mgc-workload-2
).
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: toystore\n labels:\n app: toystore\nspec:\n selector:\n matchLabels:\n app: toystore\n template:\n metadata:\n labels:\n app: toystore\n spec:\n containers:\n\n - name: toystore\n image: quay.io/3scale/authorino:echo-api\n env:\n - name: PORT\n value: \"3000\"\n ports:\n - containerPort: 3000\n name: http\n replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: toystore\nspec:\n selector:\n app: toystore\n ports:\n - name: http\n port: 80\n protocol: TCP\n targetPort: 3000\nEOF\ndone\n
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#create-the-httproute","title":"Create the HTTPRoute","text":"Create a HTTPRoute to route traffic to the services via the Gateways:
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - toystore.$KUADRANT_ZONE_ROOT_DOMAIN\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\n - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\ndone\n
Verify the routes work:
curl -ik https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys\n# HTTP/1.1 200 OK\n
Given the two clusters, and our previously created DNSPolicy
, traffic will load balance between these clusters round-robin style. Load balancing here will be determined in part by DNS TTLs, so it can take a minute or two for requests to flow to both services.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#2-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2461 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"create-toy\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n routeSelectors:\n - matches: # selects the 2nd HTTPRouteRule of the targeted route\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\nEOF\ndone\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests in a loop.
Up to 5 successful (200 OK
) requests every 10 seconds to POST /toys
, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}' --silent -k --output /dev/null https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys -X POST | egrep --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) to GET /toys
:
while :; do curl --write-out '%{http_code}' --silent -k --output /dev/null https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys | egrep --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#next-steps","title":"Next Steps","text":"Here are some good, follow-on guides that build on this walkthrough:
- AuthPolicy for Application Developers and Platform Engineers
- Deploying/Configuring Metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/template/","title":"Title","text":""},{"location":"multicluster-gateway-controller/docs/how-to/template/#introduction","title":"Introduction","text":"blah blah amazing and wonderful feature blah blah gateway blah blah DNS
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#requirements","title":"Requirements","text":" - A computer
- Electricity
- Kind
- AWS Account
- Route 53 enabled
- Other Walkthroughs
## Installation and Setup
- Clone this repo locally
-
Setup a ./controller-config.env
file in the root of the repo with the following key values
# this sets up your default managed zone\nAWS_DNS_PUBLIC_ZONE_ID=<AWS ZONE ID>\n# this is the domain at the root of your zone (foo.example.com)\nZONE_ROOT_DOMAIN=<replace.this>\nLOG_LEVEL=1\n
-
Setup a ./aws-credentials.env
with credentials to access route 53
For example:
AWS_ACCESS_KEY_ID=<access_key_id>\nAWS_SECRET_ACCESS_KEY=<secret_access_key>\nAWS_REGION=eu-west-1\n
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#open-terminal-sessions","title":"Open terminal sessions","text":"For this walkthrough, we're going to use multiple terminal sessions/windows, all using multicluster-gateway-controller
as the pwd
.
Open three windows, which we'll refer to throughout this walkthrough as:
T1
(Hub Cluster) T2
(Where we'll run our controller locally) T3
(Workloads cluster)
To setup a local instance, in T1
, run:
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#known-bugs","title":"Known bugs","text":"buzzzzz
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#follow-on-walkthroughs","title":"Follow on Walkthroughs","text":"Some good follow on walkthroughs that build on this walkthrough
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#helpful-symbols-for-dev-use","title":"Helpful symbols (For dev use)","text":" - for more see https://gist.github.com/rxaviers/7360908
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/","title":"Setting up MGC in Existing OCM Clusters","text":"This guide will show you how to install and configure the Multi-Cluster Gateway Controller in pre-existing Open Cluster Management configured clusters.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#prerequisites","title":"Prerequisites","text":" - A hub cluster running the OCM control plane (>= v0.11.0 )
- Open cluster management addons enabled
clusteradm install hub-addon --names application-manager
- Any number of additional spoke clusters that have been configured as OCM ManagedClusters
- Kubectl (>= v1.14.0)
- Either a pre-existing cert-manager(>=v1.12.2) installation or the Kustomize and Helm CLIs installed
- Amazon Web services (AWS) and or Google cloud provider (GCP) credentials. See the DNS Provider guide for obtaining these credentials.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#configure-ocm-with-rawfeedbackjsonstring-feature-gate","title":"Configure OCM with RawFeedbackJsonString Feature Gate","text":"All OCM spoke clusters must be configured with the RawFeedbackJsonString
feature gate enabled.
Patch each spoke cluster's klusterlet
in an existing OCM install:
kubectl patch klusterlet klusterlet --type merge --patch '{\"spec\": {\"workConfiguration\": {\"featureGates\": [{\"feature\": \"RawFeedbackJsonString\", \"mode\": \"Enable\"}]}}}' --context <EACH_SPOKE_CLUSTER>\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#setup-for-hub-commands","title":"Setup for hub commands","text":"Many of the commands in this document should be run in the context of your hub cluster. By configure HUB_CLUSTER which will be used in the commands:
export HUB_CLUSTER=<HUB_CUSTER_NAME>\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#install-cert-manager","title":"Install Cert-Manager","text":"Cert-manager first needs to be installed on your hub cluster. If this has not previously been installed on the cluster, see the documentation for installation instructions here.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#installing-mgc","title":"Installing MGC","text":"First, run the following command in the context of your hub cluster to install the Gateway API CRDs:
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml --context $HUB_CLUSTER\n
Verify the CRDs have been established:
kubectl wait --timeout=5m crd/gatewayclasses.gateway.networking.k8s.io crd/gateways.gateway.networking.k8s.io crd/httproutes.gateway.networking.k8s.io --for=condition=Established --context $HUB_CLUSTER\n
customresourcedefinition.apiextensions.k8s.io/gatewayclasses.gateway.networking.k8s.io condition met\ncustomresourcedefinition.apiextensions.k8s.io/gateways.gateway.networking.k8s.io condition met\ncustomresourcedefinition.apiextensions.k8s.io/httproutes.gateway.networking.k8s.io condition met\n
Then run the following command to install the MGC:
kubectl apply -k \"github.com/kuadrant/multicluster-gateway-controller.git/config/mgc-install-guide?ref=release-0.2\" --context $HUB_CLUSTER\n
In addition to the MGC, this will also install the Kuadrant add-on manager and a GatewayClass
from which MGC-managed Gateways
can be instantiated.
Verify that the MGC and add-on manager have been installed and are running:
kubectl wait --timeout=5m -n multicluster-gateway-controller-system deployment/mgc-controller-manager --for=condition=Available --context $HUB_CLUSTER\n
deployment.apps/mgc-controller-manager condition met\n
Verify that the GatewayClass
has been accepted by the MGC:
kubectl wait --timeout=5m gatewayclass/kuadrant-multi-cluster-gateway-instance-per-cluster --for=condition=Accepted --context $HUB_CLUSTER\n
gatewayclass.gateway.networking.k8s.io/kuadrant-multi-cluster-gateway-instance-per-cluster condition met\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#creating-a-managedzone","title":"Creating a ManagedZone","text":"Note: To manage the creation of DNS records, MGC uses ManagedZone resources. A ManagedZone
can be configured to use DNS Zones on both AWS (Route53), and GCP (Cloud DNS). Commands to create each are provided below.
First, depending on the provider you would like to use export the environment variables detailed here in a terminal session.
Next, create a secret containing either the AWS or GCP credentials. We'll also create a namespace for your MGC configs:
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#aws","title":"AWS:","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: mgc-aws-credentials\n namespace: multi-cluster-gateways\ntype: \"kuadrant.io/aws\"\nstringData:\n AWS_ACCESS_KEY_ID: ${KUADRANT_AWS_ACCESS_KEY_ID}\n AWS_SECRET_ACCESS_KEY: ${KUADRANT_AWS_SECRET_ACCESS_KEY}\n AWS_REGION: ${KUADRANT_AWS_REGION}\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#gcp","title":"GCP","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: mgc-gcp-credentials\n namespace: multi-cluster-gateways\ntype: \"kuadrant.io/gcp\"\nstringData:\n GOOGLE: ${GOOGLE}\n PROJECT_ID: ${PROJECT_ID}\nEOF\n
Create a ManagedZone
using the commands below:
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#aws_1","title":"AWS:","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: mgc-dev-mz\n namespace: multi-cluster-gateways\nspec:\n id: ${KUADRANT_AWS_DNS_PUBLIC_ZONE_ID}\n domainName: ${KUADRANT_ZONE_ROOT_DOMAIN}\n description: \"Dev Managed Zone\"\n dnsProviderSecretRef:\n name: mgc-aws-credentials\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#gcp_1","title":"GCP","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: mgc-dev-mz\n namespace: multi-cluster-gateways\nspec:\n id: ${ZONE_NAME}\n domainName: ${ZONE_DNS_NAME}\n description: \"Dev Managed Zone\"\n dnsProviderSecretRef:\n name: mgc-gcp-credentials\nEOF\n
Verify that the ManagedZone
has been created and is in a ready state:
kubectl get managedzone -n multi-cluster-gateways --context $HUB_CLUSTER\n
NAME DOMAIN NAME ID RECORD COUNT NAMESERVERS READY\nmgc-dev-mz ef.hcpapps.net /hostedzone/Z06419551EM30QQYMZN7F 2 [\"ns-1547.awsdns-01.co.uk\",\"ns-533.awsdns-02.net\",\"ns-200.awsdns-25.com\",\"ns-1369.awsdns-43.org\"] True\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#creating-a-cert-issuer","title":"Creating a Cert Issuer","text":"Create a ClusterIssuer
to be used with cert-manager
. For simplicity, we will create a self-signed cert issuer here, but other issuers can also be configured.
cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: mgc-ca\n namespace: cert-manager\nspec:\n selfSigned: {}\nEOF\n
Verify that the clusterIssuer
is ready:
kubectl wait --timeout=5m -n cert-manager clusterissuer/mgc-ca --for=condition=Ready --context $HUB_CLUSTER\n
clusterissuer.cert-manager.io/mgc-ca condition met\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#next-steps","title":"Next Steps","text":"Now that you have MGC installed and configured in your hub cluster, you can now continue with any of these follow-on guides:
- Installing the Kuadrant Service Protection components
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/","title":"Installing Kuadrant Service Protection into an existing OCM Managed Cluster","text":""},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#introduction","title":"Introduction","text":"This walkthrough will show you how to install and setup the Kuadrant Operator into an OCM Managed Cluster.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#prerequisites","title":"Prerequisites","text":" - Access to an Open Cluster Management(OCM) (>= v0.11.0) Managed Cluster, which has already been bootstrapped and registered with a hub cluster
- We have a guide which covers this in detail
- For more information on OCM also see:
- OCM quick start
- Managed cluster
- Kubectl (>= v1.14.0)
- OLM installed on the ManagedCluster where you want to run the Kuadrant Service Protection components
- For installation guides please see:
- Operator-sdk
- OLM
- Istio operator v1.20.0 installed on the spoke clusters
- Please see install guide here
- Gateway API v1
- To install please use:
kubectl apply -f \"https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml\"
- For more information please see: GatewayAPI DOCs
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#install-the-kuadrant-ocm-add-on","title":"Install the Kuadrant OCM Add-On","text":"To install the Kuadrant Service Protection components into a spoke ManagedCluster
, target your OCM Hub cluster with kubectl
and run:
kubectl apply -k \"github.com/kuadrant/multicluster-gateway-controller.git/config/service-protection-install-guide?ref=release-0.3\" -n namespace-of-your-managed-spoke-cluster-on-the-hub\n
The above command will install the ManagedClusterAddOn
resource needed to install the Kuadrant addon into the namespace representing a spoke cluster, and install the Kuadrant data-plane components into the open-cluster-management-agent-addon
namespace.
The Kuadrant addon will install:
- Kuadrant Operator
- Limitador (and its associated operator)
- Authorino (and its associated operator)
For more details, see the Kuadrant components installed by the kuadrant-operator
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#olm-and-openshift-catalogsource","title":"OLM and OpenShift CatalogSource","text":"The Kuadrant OCM (Open Cluster Management) Add-On depends on the Operator Lifecycle Manager (OLM)'s CatalogSource
. By default, this is set to olm/operatorhubio-catalog
.
In OpenShift environments, OLM comes pre-installed. However, it is configured to use the openshift-marketplace/community-operators
CatalogSource by default, not the olm/operatorhubio-catalog
.
To align the Kuadrant add-on with the OpenShift default CatalogSource, you can patch the add-on's CatalogSource configuration. Run the following command (note it needs to be run for each managed cluster where the add-on is installed):
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"CatalogSource\":\"community-operators\", \"CatalogSourceNS\":\"openshift-marketplace\"}' -n managed-cluster-ns\n
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#existing-istio-installations-and-changing-the-default-istio-operator-name","title":"Existing Istio installations and changing the default Istio Operator name","text":"In the case where you have an existing Istio installation on a cluster, you may encounter an issue where the Kuadrant Operator expects Istio's Operator to be named istiocontrolplane
.
The istioctl
command saves the IstioOperator CR that was used to install Istio in a copy of the CR named installed-state
.
To let the Kuadrant operator use this existing installation, set the following:
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"IstioOperator\":\"installed-state\"}' -n <managed spoke cluster>
This will propagate down and update the Kuadrant Operator, used by the Kuadrant OCM Addon.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#changing-the-name-of-the-channel-to-use","title":"Changing the name of the channel to use","text":"If you want to use a different channel with the ManagedClusterAddon
to install the kuadrant operator. You can do so by overriding the channel with the follow annotation:
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"CatalogSourceNS\":\"openshift-marketplace\", \"CatalogSource\":\"community-operators\", \"Channel\":\"preview\"}' -n managed-cluster-ns
This will propagate down and update the Kuadrant Subscription, used by OLM in the spoke.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#verify-the-kuadrant-addon-installation","title":"Verify the Kuadrant addon installation","text":"To verify the Kuadrant OCM addon has installed currently, run:
kubectl wait --timeout=5m -n kuadrant-system kuadrant/kuadrant-sample --for=condition=Ready\n
You should see the namespace kuadrant-system
, and the following pods come up:
- authorino-value
- authorino-operator-value
- kuadrant-operator-controller-manager-value
- limitador-value
- limitador-operator-controller-manager-value
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#further-reading","title":"Further Reading","text":"With the Kuadrant data plane components installed, here is some further reading material to help you utilise Authorino and Limitador:
Getting started with Authorino Getting started With Limitador
"},{"location":"multicluster-gateway-controller/docs/proposals/","title":"Index","text":""},{"location":"multicluster-gateway-controller/docs/proposals/#proposals","title":"Proposals","text":"This directory contains proposals accepted into the MGC. The template for add a proposal is located in this directory. Make a copy of the template and use it to define your own proposal.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/","title":"DNS Policy","text":""},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#problem","title":"Problem","text":"Gateway admins, need a way to define the DNS policy for a gateway distributed across multiple clusters in order to control how much and which traffic reaches these gateways. Ideally we would allow them to express a strategy that they want to use without needing to get into the details of each provider and needing to create and maintain dns record structure and individual records for all the different gateways that may be within their infrastructure.
Use Cases
As a gateway admin, I want to be able to reduce latency for my users by routing traffic based on the GEO location of the client. I want this strategy to automatically expand and adjust as my gateway topology grows and changes.
As a gateway admin, I have a discount with a particular cloud provider and want to send more of my traffic to the gateways hosted in that providers infrastructure and as I add more gateways I want that balance to remain constant and evolve to include my new gateways.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#goals","title":"Goals","text":" - Allow definition of a DNS load balancing strategy to decide how traffic should be weighted across multiple gateway instances from the central control plane.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#none-goals","title":"None Goals","text":" - Allow different DNS policies for different listeners. Although this may be something we look to support in the future, currently policy attachment does not allow for this type of targeting. This means a DNSPolicy is applied for the whole gateway currently.
- Define how health checks should work, this will be part of a separate proposal
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#terms","title":"Terms","text":" - managed listener: This is a listener with a host backed by a DNS zone managed by the multi-cluster gateway controller
- hub cluster: control plane cluster that managed 1 or more spokes
- spoke cluster: a cluster managed by the hub control plane cluster. This is where gateway are instantiated
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#proposal","title":"Proposal","text":"Provide a control plane DNSPolicy API that uses the idea of direct policy attachment from gateway API that allows a load balancing strategy to be applied to the DNS records structure for any managed listeners being served by the data plane instances of this gateway. The DNSPolicy also covers health checks that inform the DNS response but that is not covered in this document.
Below is a draft API for what we anticipate the DNSPolicy to look like
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n health:\n ...\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom: #optional\n\n - value: AWS #optional with both GEO and weighted. With GEO the custom weight is applied to gateways within a Geographic region\n weight: 10\n - value: GCP\n weight: 20\n GEO: #optional\n defaultGeo: IE # required with GEO. Chooses a default DNS response when no particular response is defined for a request from an unknown GEO.\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#available-load-balancing-strategies","title":"Available Load Balancing Strategies","text":"GEO and Weighted load balancing are well understood strategies and this API effectively allow a complex requirement to be expressed relatively simply and executed by the gateway controller in the chosen DNS provider. Our default policy will execute a \"Round Robin\" weighted strategy which reflects the current default behaviour.
With the above API we can provide weighted and GEO and weighted within a GEO. A weighted strategy with a minimum of a default weight is always required and the simplest type of policy. The multi-cluster gateway controller will set up a default policy when a gateway is discovered (shown below). This policy can be replaced or modified by the user. A weighted strategy can be complimented with a GEO strategy IE they can be used together in order to provide a GEO and weighted (within a GEO) load balancing. By defining a GEO section, you are indicating that you want to use a GEO based strategy (how this works is covered below).
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: default-policy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted: # required\n defaultWeight: 10 #required, all records created get this weight\n health:\n ... \n
In order to provide GEO based DNS and allow customisation of the weighting, we need some additional information to be provided by the gateway / cluster admin about where this gateway has been placed. For example if they want to use GEO based DNS as a strategy, we need to know what GEO identifier(s) to use for each record we create and a default GEO to use as a catch-all. Also, if the desired load balancing approach is to provide custom weighting and no longer simply use Round Robin, we will need a way to identify which records to apply that custom weighting to based on the clusters the gateway is placed on.
To solve this we will allow two new attributes to be added to the ManagedCluster
resource as labels:
kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
These two labels allow setting values in the DNSPolicy that will be reflected into DNS records for gateways placed on that cluster depending on the strategies used. (see the first DNSPolicy definition above to see how these values are used) or take a look at the examples at the bottom.
example :
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\nspec: \n
The attributes provide the key and value we need in order to understand how to define records for a given LB address based on the DNSPolicy targeting the gateway.
The kuadrant.io/lb-attribute-geo-code
attribute value is provider specific, using an invalid code will result in an error status condition in the DNSrecord resource.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#dns-record-structure","title":"DNS Record Structure","text":"This is an advanced topic and so is broken out into its own proposal doc DNS Record Structure
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#custom-weighting","title":"Custom Weighting","text":"Custom weighting will use the associated custom-weight
attribute set on the ManagedCluster
to decide which records should get a specific weight. The value of this attribute is up to the end user.
example:
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
The above is then used in the DNSPolicy to set custom weights for the records associated with the target gateway.
- value: GCP\n weight: 20\n
So any gateway targeted by a DNSPolicy with the above definition that is placed on a ManagedCluster
with the kuadrant.io/lb-attribute-custom-weight
set with a value of GCP will get an A record with a weight of 20
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#status","title":"Status","text":"DNSPolicy should have a ready condition that reflect that the DNSRecords have been created and configured as expected. In the case that there is an invalid policy, the status message should reflect this and indicate to the user that the old DNS has been preserved.
We will also want to add a status condition to the gateway status indicating it is effected by this policy. Gateway API recommends the following status condition
- type: gateway.networking.k8s.io/PolicyAffected\n status: True \n message: \"DNSPolicy has been applied\"\n reason: PolicyApplied\n ...\n
https://github.com/kubernetes-sigs/gateway-api/pull/2128/files#diff-afe84021d0647e83f420f99f5d18b392abe5ec82d68f03156c7534de9f19a30aR888
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#example-policies","title":"Example Policies","text":""},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#round-robin-the-default-policy","title":"Round Robin (the default policy)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: RoundRobinPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#geo-round-robin","title":"GEO (Round Robin)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNS\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n GEO:\n defaultGeo: IE\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#custom","title":"Custom","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: SendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure #any record associated with a gateway on a cluster without this value gets the default\n weight: 30\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#geo-with-custom-weights","title":"GEO with Custom Weights","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNSAndSendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure\n weight: 30\n GEO:\n defaultGeo: IE\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#considerations-and-limitations","title":"Considerations and Limitations","text":"You cannot have a different load balancing strategy for each listener within a gateway. So in the following gateway definition
spec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n hostname: myapp.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS\n - allowedRoutes:\n namespaces:\n from: All\n hostname: other.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS \n
The DNS policy targeting this gateway will apply to both myapp.hcpapps.net and other.hcpapps.net
However, there is still significant value even with this limitation. This limitation is something we will likely revisit in the future
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#background-docs","title":"Background Docs","text":"DNS Provider Support
AWS DNS
Google DNS
Azure DNS
Direct Policy Attachment
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/","title":"DNSRecordStructure","text":"DNSRecord is our API for expressing DNS endpoints via a kube CRD based API. It is managed by the multi-cluster gateway controller based on the desired state expressed in higher level APIs such as the Gateway or a DNSPolicy. In order to provide our feature set, we need to carefully consider how we structure our records and the types of records we need. This document proposes a particular structure based on the requirements and feature set we have.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#requirements","title":"Requirements","text":"We want to be able to support Gateway definitions that use the following listener definitions:
- wildcard:
*.example.com
and fully qualified listener host www.example.com
definitions with the notable exception of fully wildcarded ie *
as we cannot provide any DNS or TLS for something with no defined hostname. - listeners that have HTTPRoute defined on less than all the clusters where the listener is available. IE we don't want to send traffic to clusters where there is no HTTPRoute attached to the listener.
- Gateway instances that provide IPs that are deployed alongside instances on different infra that provide host names causing the addresses types on each of gateway instance to be different (IPAddress or HostAddress).
- We want to provide GEO based DNS as a feature of DNSPolicy and so our DNSRecord structure must support this.
- We want to offer default weighted and custom weighted DNS as part of DNSPolicy
- We want to allow root or apex domain to be used as listener hosts
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#diagram","title":"Diagram","text":"https://lucid.app/lucidchart/2f95c9c9-8ddf-4609-af37-48145c02ef7f/edit?viewport_loc=-188%2C-61%2C2400%2C1183%2C0_0&invitationId=inv_d5f35eb7-16a9-40ec-b568-38556de9b568
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#proposal","title":"Proposal","text":"For each listener defined in a gateway, we will create a set of records with the following rules.
none apex domain:
We will have a generated lb (load balancer) dns name that we will use as a CNAME for the listener hostname. This DNS name is not intended for use within a HTTPRoute but is instead just a DNS construct. This will allow us to set up additional CNAME records for that DNS name in the future that are returned based a GEO location. These DNS records will also be CNAMES pointing to specific gateway dns names, this will allow us to setup a weighted response. So the first layer CNAME handles balancing based on geo, the second layer handles balancing based on weighting.
shop.example.com\n | |\n (IE) (AUS)\n CNAME lb.shop.. lb.shop..\n | | | |\n (w 100) (w 200) (w 100) (w100)\n CNAME g1.lb.. g2.lb.. g3.lb.. g4.lb..\n A 192.. A 81.. CNAME aws.lb A 82..\n
When there is no geo strategy defined within the DNSPolicy, we will put everything into a default geo (IE a catch-all record) default.lb-{guid}.{listenerHost}
but set the routing policy to GEO allowing us to add more geo based records in the future if the gateway admin decides to move to a geo strategy as their needs grow.
To ensure this lb dns name is unique and does not clash we will use a short guid as part of the subdomain so lb-{guid}.{listenerHost}.
this guid will be based on the gateway name and gateway namespace in the control plane.
For a geo strategy we will add a geo record with a prefix to the lb subdomain based on the geo code. When there is no geo we will use default
as the prefix. {geo-code}.lb-{guid}.{listenerHost}
. Finally, for each gateway instance on a target cluster we will add a {spokeClusterName}.lb-{guid}.{listenerHost}
To allow for a mix of hostname and IP address types, we will always use a CNAME . So we will create a dns name for IPAddress with the following structure: {guid}.lb-{guid}.{listenerHost}
where the first guid will be based on the cluster name where the gateway is placed.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#apex-domains","title":"Apex Domains","text":"An apex domain is the domain at the apex or root of a zone. These are handled differently by DNS as they often have NS and SOA records. Generally it is not possible to set up a CNAME for apex domain (although some providers allow it).
If a listener is added to a gateway that is an apex domain, we can only add A records for that domain to keep ourselves compliant with as many providers as possible. If a listener is the apex domain, we will setup A records for that domain (favouring gateways with an IP address or resolving the IP behind a host) but there will be no special balancing/weighting done. Instead, we will expect that the owner of that will setup a HTTPRoute with a 301 permanent redirect sending users from the apex domain e.g. example.com to something like: www.example.com where the www subdomain based listener would use the rules of the none apex domains and be where advanced geo and weighted strategies are applied.
- gateway listener host name : example.com
- example.com A 81.17.241.20
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#geo-agnostic-everything-is-in-a-default-geo-catch-all","title":"Geo Agnostic (everything is in a default * geo catch all)","text":"This is the type of DNS Record structure that would back our default DNSPolicy.
-
gateway listener host name : www.example.com
DNSRecords:
- www.example.com CNAME lb-1ab1.www.example.com
- lb-1ab1.www.example.com CNAME geolocation * default.lb-1ab1.www.example.com
- default.lb-1ab1.www.example.com CNAME weighted 100 1bc1.lb-1ab1.www.example.com
- default.lb-1ab1.www.example.com CNAME weighted 100 aws.lb.com
- 1bc1.lb-1ab1.www.example.com A 192.22.2.1
So in the above example working up from the bottom, we have a mix of hostname and IP based addresses for the gateway instance. We have 2 evenly weighted records that balance between the two available gateways, then next we have the geo based record that is set to a default catch all as no geo has been specified then finally we have the actual listener hostname that points at our DNS based load balancer name.
DNSRecord Yaml
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: {gateway-name}-{listenerName}\n namespace: multi-cluster-gateways\nspec:\n dnsName: www.example.com\n managedZone:\n name: mgc-dev-mz\n endpoints:\n\n - dnsName: www.example.com\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-1ab1.www.example.com\n - dnsName: lb-1ab1.www.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: mygateway-multicluster-gateways\n providerSpecific:\n - name: \"geolocation-country-code\"\n value: \"*\"\n targets:\n - default.lb-1ab1.www.example.com\n - dnsName: default.lb-1ab1.www.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: cluster1\n providerSpecific:\n - name: \"weight\"\n value: \"100\"\n targets:\n - 1bc1.lb-1ab1.www.example.com\n - dnsName: default.lb-a1b2.shop.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: cluster2\n providerSpecific:\n - name: \"weight\"\n value: \"100\"\n targets:\n - aws.lb.com\n - dnsName: 1bc1.lb-1ab1.www.example.com\n recordTTL: 60\n recordType: A\n targets:\n - 192.22.2.1\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#geo-specific","title":"geo specific","text":"Once the end user selects to use a geo strategy via the DNSPolicy, we then need to restructure our DNS to add in our geo specific records. Here the default record
lb short code is {gw name + gw namespace} gw short code is {cluster name}
-
gateway listener host : shop.example.com
DNSRecords:
- shop.example.com CNAME lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com CNAME geolocation ireland ie.lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com geolocation australia aus.lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com geolocation default ie.lb-a1b2.shop.example.com (set by the default geo option)
- ie.lb-a1b2.shop.example.com CNAME weighted 100 ab1.lb-a1b2.shop.example.com
- ie.lb-a1b2.shop.example.com CNAME weighted 100 aws.lb.com
- aus.lb-a1b2.shop.example.com CNAME weighted 100 ab2.lb-a1b2.shop.example.com
- aus.lb-a1b2.shop.example.com CNAME weighted 100 ab3.lb-a1b2.shop.example.com
- ab1.lb-a1b2.shop.example.com A 192.22.2.1 192.22.2.5
- ab2.lb-a1b2.shop.example.com A 192.22.2.3
- ab3.lb-a1b2.shop.example.com A 192.22.2.4
In the above example we move from a default catch all to geo specific setup. Based on a DNSPolicy that specifies IE as the default geo location. We leave the default
subdomain in place to allow for clients that may still be using that and set up geo specific subdomains that allow us to route traffic based on its origin. In this example we are load balancing across 2 geos and 4 clusters
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#wildcards","title":"WildCards","text":"In the examples we have used fully qualified domain names, however sometimes it may be required to use a wildcard subdomain. example:
- gateway listener host : *.example.com
To support these we need to change the name of the DNSRecord away from the name of the listener as the k8s resource does not allow * in the name.
To do this we will set the dns record resource name to be a combination of {gateway-name}-{listenerName}
to keep a record of the host this is for we will set a top level property named dnsName
. You can see an example in the DNSRecord above.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#pros","title":"Pros","text":"This setup allows us a powerful set of features and flexibility
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#cons","title":"Cons","text":"With this CNAME based approach we are increasing the number of DNS lookups required to get to an IP which will increase the cost and add a small amount of latency. To counteract this, we will set a reasonably high TTL (at least 5 mins) for our CNAMES and (2 mins) for A records
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/","title":"Multiple DNS Provider Support","text":"Authors: Michael Nairn @mikenairn
Epic: https://github.com/Kuadrant/multicluster-gateway-controller/issues/189
Date: 25th May 2023
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#job-stories","title":"Job Stories","text":" - As a developer, I want to use MGC with a domain hosted in one of the major cloud DNS providers (Google Cloud DNS, Azure DNS or AWS Route53)
- As a developer, I want to use multiple domains with a single instance of MGC, each hosted on different cloud providers
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#goals","title":"Goals","text":" - Add ManagedZone and DNSRecord support for Google Cloud DNS
- Add ManagedZone and DNSRecord support for Azure DNS
- Add DNSRecord support for CoreDNS (Default for development environment)
- Update ManagedZone and DNSRecord support for AWS Route53
- Add support for multiple providers with a single instance of MGC
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#non-goals","title":"Non Goals","text":" - Support for every DNS provider
- Support for health checks
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#current-approach","title":"Current Approach","text":"Currently, MGC only supports AWS Route53 as a dns provider. A single instance of a DNSProvider resource is created per MGC instance which is configured with AWS config loaded from the environment. This provider is loaded into all controllers requiring dns access (ManagedZone and DNSRecord reconciliations), allowing a single instance of MGC to operate against a single account on a single dns provider.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#proposed-solution","title":"Proposed Solution","text":"MGC has three features it requires of any DNS provider in order to offer full support, DNSRecord management, Zone management and DNS Health checks. We do not however want to limit to providers that only offer this functionality, so to add support for a provider the minimum that provider should offer is API access to managed DNS records. MGC will continue to provide Zone management and DNS Health checks support on a per-provider basis.
Support will be added for AWS(Route53), Google(Google Cloud DNS), Azure and investigation into possible adding CoreDNS (intended for local dev purposes), with the following proposed initial support:
Provider DNS Records DNS Zones DNS Health AWS Route53 X X X Google Cloud DNS X X - AzureDNS X X - CoreDNS X - - Add DNSProvider as an API for MGC which contains all the required config for that particular provider including the credentials. This can be thought of in a similar way to a cert manager Issuer. Update ManagedZone to add a reference to a DNSProvider. This will be a required field on the ManagedZone and a DNSProvider must exist before a ManagedZone can be created. Update all controllers load the DNSProvider directly from the ManagedZone during reconciliation loops and remove the single controller wide instance. Add new provider implementations for google, azure and coredns.
* All providers constructors should accept a single struct containing all required config for that particular provider.\n* Providers must be configured from credentials passed in the config and not rely on environment variables.\n
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#other-solutions-investigated","title":"Other Solutions investigated","text":"Investigation was carried out into the suitability of [External DNS] (https://github.com/kubernetes-sigs/external-dns) as the sole means of managing dns resources. Unfortunately, while external dns does offer support for basic dns record management with a wide range of providers, there were too many features missing making it unsuitable at this time for integration.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#external-dns-as-a-separate-controller","title":"External DNS as a separate controller","text":"Run external dns, as intended, as a separate controller alongside mgc, and pass all responsibility for reconciling DNSRecord resources to it. All DNSRecord reconciliation is removed from MGC.
Issues:
- A single instance of external dns will only work with a single provider and a single set of credentials. As it is, in order to support more than a single provider, more than one external dns instance would need to be created, one for each provider/account pair.
- Geo and Weighted routing policies are not implemented for any provider other than AWS Route53.
- Only supports basic dns record management (A,CNAME, NS records etc ..), with no support for managed zones or health checks.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#external-dns-as-a-module-dependency","title":"External DNS as a module dependency","text":"Add external dns as a module dependency in order to make use of their DNS Providers, but continue to reconcile DNSRecords in MGC.
Issues:
- External DNS Providers all create clients using the current environment. Would require extensive refactoring in order to modify each provider to optionally be constructed using static credentials.
- Clients were all internal making it impossible, without modification, to use the upstream code to extend the provider behaviour to support additional functionality such as managed zone creation.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#checklist","title":"Checklist","text":" - [ ] An epic has been created and linked to
- [ ] Reviewers have been added. It is important that the right reviewers are selected.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/","title":"Provider agnostic DNS Health checks","text":""},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#introduction","title":"Introduction","text":"The MGC has the ability to extend the DNS configuration of the gateway with the DNSPolicy resource. This resource allows users to configure health checks. As a result of configuring health checks, the controller creates the health checks in Route53, attaching them to the related DNS records. This has the benefit of automatically disabling an endpoint if it becomes unhealthy, and enabling it again when it becomes healthy again.
This feature has a few shortfalls:
- It\u2019s tightly coupled with Route53. If other DNS providers are supported they must either provide a similar feature, or health checks will not be supported
- Lacks the ability to reach endpoints in private networks
- requires using the gateway controller to implement, maintain and test multiple providers
This document describes a proposal to extend the current health check implementation to overcome these shortfalls.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#goals","title":"Goals","text":" - Ability to configure health checks in the DNSPolicy associated to a Gateway
- DNS records are disabled when the associated health check fails
- Current status of the defined health checks is visible to the end user
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#nongoals","title":"Nongoals","text":" - Ability for the health checks to reach endpoints in separate private networks
- Transparently keep support for other health check providers like Route53
- Having health checks for wildcard listeners
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#use-cases","title":"Use-cases","text":" - As a gateway administrator, I would like to define a health check that each service sitting behind a particular listener across the production clusters has to implement to ensure we can automatically respond, failover and mitigate a failing instance of the service
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#proposal","title":"Proposal","text":"Currently, this functionality will be added to the existing MGC, and executed within that component. This will be created with the knowledge that it may need to be made into an external component in the future.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnspolicy-resource","title":"DNSPolicy
resource","text":"The presence of the healthCheck
means that for every DNS endpoint (that is either an A record, or a CNAME to an external host), a health check is created based on the health check configuration in the DNSPolicy.
A failureThreshold
field will be added to the health spec, allowing users to configure a number of consecutive health check failures that must be observed before the endpoint is considered unhealthy.
Example DNS Policy with a defined health check.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n healthCheck:\n endpoint: /health\n failureThreshold: 5\n port: 443\n protocol: https\n additionalHeaders: <SecretRef>\n expectedResponses:\n\n - 200\n - 301\n - 302\n - 407\n AllowInsecureCertificates: true\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: prod-web\n namespace: multi-cluster-gateways\n
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnshealthcheckprobe-resource","title":"DNSHealthCheckProbe
resource","text":"The DNSHealthCheckProbe resource configures a health probe in the controller to perform the health checks against an identified final A or CNAME endpoint. When created by the controller as a result of a DNS Policy, this will have an owner ref of the DNS Policy that caused it to be created.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSHealthCheckProbe\nmetadata:\n name: example-probe\nspec:\n port: \"...\"\n host: \u201c...\u201d\n address: \"...\"\n path: \"...\"\n protocol: \"...\"\n interval: \"...\"\n additionalHeaders: <SecretRef>\n expectedResponses:\n\n - 200\n 201\n 301\n AllowInsecureCertificate: true\nstatus:\n healthy: true\n consecutiveFailures: 0\n reason: \"\"\n lastCheck: \"...\"\n
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#spec-fields-definition","title":"Spec Fields Definition","text":" - Port The port to use
- Address The address to connect to (e.g. IP address or hostname of a clusters loadbalancer)
- Host The host to request in the Host header
- Path The path to request
- Protocol The protocol to use for this request
- Interval How frequently this check would ideally be executed.
- AdditionalHeaders Optional secret ref which contains k/v: headers and their values that can be specified to ensure the health check is successful.
- ExpectedResponses Optional HTTP response codes that should be considered healthy (defaults are 200 and 201).
- AllowInsecureCertificate Optional flag to allow using invalid (e.g. self-signed) certificates, default is false.
The reconciliation of this resource results in the configuration of a health probe, which targets the endpoint and updates the status. The status is propagated to the providerSpecific status of the equivalent endpoint in the DNSRecord
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#changes-to-current-controllers","title":"Changes to current controllers","text":"In order to support this new feature, the following changes in the behaviour of the controllers are proposed.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnspolicy-controller","title":"DNSPolicy controller","text":"Currently, the reconciliation loop of this controller creates health checks in the configured DNS provider (Route53 currently) based on the spec of the DNSPolicy, separately from the reconciliation of the DNSRecords. The proposed change is to reconcile health check probe CRs based on the combination of DNS Records and DNS Policies.
Instead of Route53 health checks, the controller will create DNSHealthCheckProbe
resources.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnsrecord-controller","title":"DNSRecord controller","text":"When reconciling a DNS Record, the DNS Record reconciler will retrieve the relevant DNSHealthCheckProbe CRs, and consult the status of them when determining what value to assign to a particular endpoint's weight.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dns-record-structure-diagram","title":"DNS Record Structure Diagram:","text":"https://lucid.app/lucidchart/2f95c9c9-8ddf-4609-af37-48145c02ef7f/edit?viewport_loc=-188%2C-61%2C2400%2C1183%2C0_0&invitationId=inv_d5f35eb7-16a9-40ec-b568-38556de9b568 How
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removing-unhealthy-endpoints","title":"Removing unhealthy Endpoints","text":"When a DNS health check probe is failing, it will update the DNS Record CR with a custom field on that endpoint to mark it as failing.
There are then 3 scenarios which we need to consider: 1 - All endpoints are healthy 2 - All endpoints are unhealthy 3 - Some endpoints are healthy and some are unhealthy.
In the cases 1 and 2, the result should be the same: All records are published to the DNS Provider.
When scenario 3 is encountered the following process should be followed:
For each gateway IP or CNAME: this should be omitted if unhealthy.\nFor each managed gateway CNAME: This should be omitted if all child records are unhealthy.\nFor each GEO CNAME: This should be omitted if all the managed gateway CNAMEs have been omitted.\nLoad balancer CNAME: This should never be omitted.\n
If we consider the DNS record to be a hierarchy of parents and children, then whenever any parent has no healthy children that parent is also considered unhealthy. No unhealthy elements are to be included in the DNS Record.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removal-process","title":"Removal Process","text":"When removing DNS records, we will want to avoid any NXDOMAIN
responses from the DNS service as this will cause the resolver to cache this missed domain for a while (30 minutes or more). The NXDOMAIN
response is triggered when the resolver attempts to resolve a host that does not have any records in the zone file.
The situation that would cause this to occur is when we have removed a record but still refer to it from other records.
As we wish to avoid any NXDOMAIN
responses from the nameserver - causing the resolver to cache this missed response we will need to ensure that any time a DNS Record (CNAME or A) is removed, we also remove any records that refer to the removed record. (e.g. when the gateway A record is removed, we will need to remove the managed gateway CNAME that refers to that A record).
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removal-example","title":"Removal Example","text":"Given the following DNS Records (simplified hosts used in example):
01 host.example.com. 300 IN CNAME lb.hcpapps.net.\n02 lb.hcpapps.net. 60 IN CNAME default-geo.hcpapps.net.\n03 default-geo.hcpapps.net. 120 IN CNAME cluster1.hcpapps.net.\n04 default-geo.hcpapps.net. 120 IN CNAME cluster2.hcpapps.net.\n05 cluster1.hcpapps.net. 300 IN CNAME cluster1-gw1.hcpapps.net.\n06 cluster1.hcpapps.net. 300 IN CNAME cluster1-gw2.hcpapps.net.\n07 cluster2.hcpapps.net. 300 IN CNAME cluster2-gw1.hcpapps.net.\n08 cluster2.hcpapps.net. 300 IN CNAME cluster2-gw2.hcpapps.net.\n09 cluster1-gw1.hcpapps.net. 60 IN CNAME cluster1-gw1.aws.com.\n10 cluster1-gw2.hcpapps.net. 60 IN CNAME cluster1-gw2.aws.com.\n11 cluster2-gw1.hcpapps.net. 60 IN CNAME cluster2-gw1.aws.com.\n12 cluster2-gw2.hcpapps.net. 60 IN CNAME cluster2-gw2.aws.com.\n
cases: - Record 09 becomes unhealthy: remove records 09 and 05.
- Record 09 and 10 become unhealthy: remove records 09, 10, 05, 06, 03
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#further-reading","title":"Further reading","text":"Domain Names RFC: https://datatracker.ietf.org/doc/html/rfc1034
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#executing-the-probes","title":"Executing the probes","text":"There will be a DNSHealthCheckProbe CR controller added to the controller. This controller will create an instance of a HealthMonitor
, the HealthMonitor ensures that each DNSHealthCheckProbe CR has a matching probeQueuer object running. It will also handle both the updating of the probeQueuer on CR update and the removal of probeQueuers, when a DNSHealthcheckProbe is removed.
The ProbeQueuer
will add a health check request to a queue based on a configured interval, this queue is consumed by a ProbeWorker
, probeQueuers work on their own goroutine.
The ProbeWorker is responsible for actually executing the probe, and updating the DNSHealthCheckProbe CR status. The probeWorker executes on its own goroutine.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/","title":"Proposal: Aggregation of Status Conditions","text":""},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#background","title":"Background","text":"Status conditions are used to represent the current state of a resource and provide information about any problems or issues that might be affecting it. They are defined as an array of Condition objects within the status section of a resource's YAML definition.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#problem-statement","title":"Problem Statement","text":"When multiple instances of a resource (e.g. a Gateway) are running across multiple clusters, it can be difficult to know the current state of each instance without checking each one individually. This can be time-consuming and error-prone, especially when there are a large number of clusters or resources.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#proposal","title":"Proposal","text":"To solve this problem, I'm proposing we leverage the status block in the control plane instance of that resource, aggregating the statuses to convey the necessary information.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#status-conditions","title":"Status Conditions","text":"For example, if the Ready
status condition type of a Gateway
is True
for all instances of the Gateway
resource across all clusters, then the Gateway
in the control plane will have the Ready
status condition type also set to True
.
status:\n conditions:\n\n - type: Ready\n status: True\n message: All listeners are valid\n
If the Ready
status condition type of some instances is not True
, the Ready
status condition type of the Gateway
in the control plane will be False
.
status:\n conditions:\n\n - type: Ready\n status: False\n
In addition, if the Ready
status condition type is False
, the Gateway
in the control plane should include a status message for each Gateway
instance where Ready
is False
. This message would indicate the reason why the condition is not true for each Gateway
.
status:\n conditions:\n\n - type: Ready\n status: False\n message: \"gateway-1 Listener certificate is expired; gateway-3 No listener configured for port 80\"\n
In this example, the Ready
status condition type is False
because two of the three Gateway instances (gateway-1 and gateway-3) have issues with their listeners. For gateway-1, the reason for the False
condition is that the listener certificate is expired, and for gateway-3, the reason is that no listener is configured for port 80. These reasons are included as status messages in the Gateway
resource in the control plane.
As there may be different reasons for the condition being False
across different clusters, it doesn't make sense to aggregate the reason
field. The reason
field is intended to be a programmatic identifier, while the message
field allows for a human readable message i.e. a semi-colon separated list of messages.
The lastTransitionTime
and observedGeneration
fields will behave as normal for the resource in the control plane.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#addresses-and-listeners-status","title":"Addresses and Listeners status","text":"The Gateway status can include information about addresses, like load balancer IP Addresses assigned to the Gateway, and listeners, such as the number of attached routes for each listener. This information is useful at the control plane level. For example, a DNS Record should only exist as long as there is at least 1 attached route for a listener. It can also be more complicated than that when it comes to multi cluster gateways. A DNS Record should only include the IP Addresses of the Gateway instances where the listener has at least 1 attached route. This is important when initial setup of DNS Records happen as applications start. It doesn't make sense to route traffic to a Gateway where a listener isn't ready/attached yet. It also comes into play when a Gateway is displaced either due to changing placement decision or removal.
In summary, the IP Addresses and number of attached routes per listener per Gateway instance is needed in the control plane to manage DNS effectively. This proposal adds that information the hub Gateway status block. This will ensure a decoupling of the DNS logic from the underlying resource/status syncing implementation (i.e. ManifestWork status feedback rules)
First, here are 2 instances of a multi cluster Gateway in 2 separate spoke clusters. The yaml is shortened to highlight the status block.
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n\n - type: IPAddress\n value: 172.31.200.0\n - type: IPAddress\n value: 172.31.201.0\n listeners:\n - attachedRoutes: 0\n conditions:\n name: api\n - attachedRoutes: 1\n conditions:\n name: web\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n - type: IPAddress\n value: 172.31.202.0\n - type: IPAddress\n value: 172.31.203.0\n listeners:\n - attachedRoutes: 1\n name: api\n - attachedRoutes: 1\n name: web\n
And here is the proposed status aggregation in the hub Gateway:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_1/172.31.200.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_1/172.31.201.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_2/172.31.202.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_2/172.31.203.0\n listeners:\n - attachedRoutes: 0\n name: cluster_1.api\n - attachedRoutes: 1\n name: cluster_1.web\n - attachedRoutes: 1\n name: cluster_2.api\n - attachedRoutes: 1\n name: cluster_2.web\n
The MultiCluster Gateway Controller will use a custom implementation of the addresses
and listenerers
fields. The address type
is of type AddressType, where the type is a domain-prefixed string identifier. The value can be split on the forward slash, /
, to give the cluster name and the underlying Gateway IPAddress value of type IPAddress. Both the IPAddress and Hostname types will be supported. The type strings for either will be kuadrant.io/MultiClusterIPAddress
and kuadrant.io/MultiClusterHostname
The listener name
is of type SectionName, with validation on allowed characters and max length of 253. The name can be split on the period, .
, to give the cluster name and the underlying listener name. As there are limits on the character length for the name
field, this puts a lower limit restriction on the cluster names and listener names used to ensure proper operation of this status aggregation. If the validation fails, a status condition showing a validation error should be included in the hub Gateway status block.
"},{"location":"multicluster-gateway-controller/docs/proposals/template/","title":"Proposal Template","text":"Authors: {authors names} Epic: {Issue of type epic this relates to} Date: {date proposed}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#job-stories","title":"Job Stories","text":"{ A bullet point list of stories this proposal solves}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#goals","title":"Goals","text":"{A bullet point list of the goals this will achieve}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#non-goals","title":"Non Goals","text":"{A bullet point list of goals that this will not achieve, IE scoping}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#current-approach","title":"Current Approach","text":"{outline the current approach if any}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#proposed-solution","title":"Proposed Solution","text":"{outline the proposed solution, links to diagrams and PRs can go here along with the details of your solution}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#testing","title":"Testing","text":"{outline any testing considerations. Does this need some form of load/performance test. Are there any considerations when thinking about an e2e test}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#checklist","title":"Checklist","text":" - [ ] An epic has been created and linked to
- [ ] Reviewers have been added. It is important that the right reviewers are selected.
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/aws/aws/","title":"AWS DNS","text":"AWS supports Weighted(Weighted Round Robin) and Geolocation routing policies https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html. Both of these can be configured directly on records in AWS route 53.
GEO Weighted
Weighted
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/azure/azure/","title":"Azure DNS","text":""},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/azure/azure/#azure","title":"Azure","text":"https://portal.azure.com/
Azure supports Weighted and Geolocation routing policies, but requires records to alias to a Traffic Manager resource that must also be created in the users account https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-routing-methods
Notes:
- A Traffic Manager Profile is created per record set and is created with a routing method (Weighted or Geographic) https://portal.azure.com/#view/Microsoft_Azure_Network/LoadBalancingHubMenuBlade/~/TrafficManagers
- Only a singe IP can be added to a DNSRecord set. A traffic manager profile must be created and aliased from a DNSRecord set for anything that involves more than a single target.
- Significantly more resources to manage in order to achieve functionality comparable with Google and AWS.
- The modelling of the records is significantly different from AWS Route53, but the current DNSRecord spec could still work. The azure implementation will have to process the endpoint list and create traffic manager policies as required to satisfy the record set.
Given the example DNSRecord here describing a record set for a geo location routing policy with four clusters, two in two regions (North America and Europe), the following Azure resources are required.
Three DNSRecords, each aliased to a different traffic manager:
- dnsrecord-geo-azure-hcpapps-net (dnsrecord-geo.azure.hcpapps.net) aliased to Traffic Manager Profile 1 (dnsrecord-geo-azure-hcpapps-net)
- dnsrecord-geo-na.azure-hcpapps-net (dnsrecord-geo.na.azure.hcpapps.net) aliased to Traffic Manager Profile 2 (dnsrecord-geo-na-azure-hcpapps-net)
- dnsrecord-geo-eu.azure-hcpapps-net (dnsrecord-geo.eu.azure.hcpapps.net) aliased to Traffic Manager Profile 3 (dnsrecord-geo-eu-azure-hcpapps-net)
Three Traffic Manager Profiles:
- Traffic Manager Profile 1 (dnsrecord-geo-azure-hcpapps-net): Geolocation routing policy with two region specific FQDN targets (dnsrecord-geo.eu.azure.hcpapps.net and dnsrecord-geo.na.azure.hcpapps.net).
- Traffic Manager Profile 2 (dnsrecord-geo-na-azure-hcpapps-net): Weighted routed policy with two IP address endpoints (172.31.0.1 and 172.31.0.2) with equal weighting.
- Traffic Manager Profile 3 (dnsrecord-geo-eu-azure-hcpapps-net): Weighted routed policy with two IP address endpoints (172.31.0.3 and 172.31.0.4) with equal weighting.
dig dnsrecord-geo.azure.hcpapps.net\n\n; <<>> DiG 9.18.12 <<>> dnsrecord-geo.azure.hcpapps.net\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 16236\n;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 65494\n;; QUESTION SECTION:\n;dnsrecord-geo.azure.hcpapps.net. IN A\n\n;; ANSWER SECTION:\ndnsrecord-geo.azure.hcpapps.net. 60 IN CNAME dnsrecord-geo-azure-hcpapps-net.trafficmanager.net.\ndnsrecord-geo-azure-hcpapps-net.trafficmanager.net. 60 IN CNAME dnsrecord-geo.eu.azure.hcpapps.net.\ndnsrecord-geo.eu.azure.hcpapps.net. 60 IN A 172.31.0.3\n\n;; Query time: 88 msec\n;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)\n;; WHEN: Tue May 30 15:05:07 IST 2023\n;; MSG SIZE rcvd: 168\n
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/google/google/","title":"Google DNS","text":""},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/google/google/#google","title":"Google","text":"https://console.cloud.google.com/net-services/dns/zones
Google supports Weighted(Weighted Round Robin) and Geolocation routing policies https://cloud.google.com/dns/docs/zones/manage-routing-policies. Both of these can be configured directly on records in Google Cloud DNS and no secondary Traffic Management resource is required.
Notes:
- Record sets are modelled as a single endpoint with routing policy embedded. This is a different approach to Route53 where each individual A/CNAME would have its own record entry.
- Weight must be an integer between 0 - 10000
- There are no continent options for region, only finer grained regions such as us-east1, europe-west-1 etc...
- There appears to be no way to set a default region, google just routes requests to the nearest supported region.
- The current approach used in AWS Route53 for geo routing will work in the same way on Google DNS. A single CNAME record with geo routing policy specifying multiple geo specific A record entries as targets.
- Geo and weighted routing can be combined, as with AWS Route53, allowing traffic with a region to be routed using weightings.
- The modelling of the records is slightly different from AWS, but the current DNSRecord spec could still work. The Google implementation of AddRecords will have to process the list of endpoints in order to group related endpoints in order to build up the required API request. In this case there would not be a 1:1 mapping between an endpoint in a DNSRecord and the dns provider, but the DNSRecord contents would be kept consistent across all providers and compatibility with external-dns would be maintained.
Example request for Geo CNAME record:
POST https://dns.googleapis.com/dns/v1beta2/projects/it-cloud-gcp-rd-midd-san/managedZones/google-hcpapps-net/rrsets
{\n \"name\": \"dnsrecord-geo.google.hcpapps.net.\",\n \"routingPolicy\": {\n \"geo\": {\n \"item\": [\n {\n \"location\": \"us-east1\",\n \"rrdata\": [\n \"dnsrecord-geo.na.google.hcpapps.net.\"\n ]\n },\n {\n \"location\": \"europe-west1\",\n \"rrdata\": [\n \"dnsrecord-geo.eu.google.hcpapps.net.\"\n ]\n }\n ],\n \"enableFencing\": false\n }\n },\n \"ttl\": 60,\n \"type\": \"CNAME\"\n}\n
Example request for Weighted A record:
POST https://dns.googleapis.com/dns/v1beta2/projects/it-cloud-gcp-rd-midd-san/managedZones/google-hcpapps-net/rrsets
{\n \"name\": \"dnsrecord-geo.na.google.hcpapps.net.\",\n \"routingPolicy\": {\n \"wrr\": {\n \"item\": [\n {\n \"weight\": 60.0,\n \"rrdata\": [\n \"172.31.0.1\"\n ]\n },\n {\n \"weight\": 60.0,\n \"rrdata\": [\n \"172.31.0.2\"\n ]\n }\n ]\n }\n },\n \"ttl\": 60,\n \"type\": \"A\"\n}\n
Given the example DNSRecord here describing a record set for a geo location routing policy with four clusters, two in two regions (North America and Europe), the following resources are required.
Three DNSRecords, one CNAME (dnsrecord-geo.google.hcpapps.net) and 2 A records (dnsrecord-geo.na.google.hcpapps.net and dnsrecord-geo.eu.google.hcpapps.net)
dig dnsrecord-geo.google.hcpapps.net\n\n; <<>> DiG 9.18.12 <<>> dnsrecord-geo.google.hcpapps.net\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 22504\n;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 65494\n;; QUESTION SECTION:\n;dnsrecord-geo.google.hcpapps.net. IN A\n\n;; ANSWER SECTION:\ndnsrecord-geo.google.hcpapps.net. 60 IN CNAME dnsrecord-geo.eu.google.hcpapps.net.\ndnsrecord-geo.eu.google.hcpapps.net. 60 IN A 172.31.0.4\n\n;; Query time: 33 msec\n;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)\n;; WHEN: Tue May 30 15:05:25 IST 2023\n;; MSG SIZE rcvd: 108\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/","title":"Olm","text":""},{"location":"multicluster-gateway-controller/docs/versioning/olm/#how-to-create-a-mgc-olm-bundle-catalog-and-how-to-install-mgc-via-olm","title":"How to create a MGC OLM bundle, catalog and how to install MGC via OLM","text":" NOTE: You can supply different env vars to the following make commands these include:
* Version using the env var VERSION \n* Tag via the env var IMAGE_TAG for tags not following the semantic format.\n* Image registry via the env var REGISTRY\n* Registry org via the env var ORG\n\nFor example\n
make bundle-build-push VERISON=2.0.1 make catalog-build-push IMAGE_TAG=asdf
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#creating-the-bundle","title":"Creating the bundle","text":" - Generate build and push the OLM bundle manifests for MGC, run the following make target:
make bundle-build-push\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#creating-the-catalog","title":"Creating the catalog","text":" - Build and push the catalog image
make catalog-build-push\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#installing-the-operator-via-olm-catalog","title":"Installing the operator via OLM catalog","text":" -
Create a namespace:
cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways-system\nEOF\n
-
Create a catalog source: ```bash cat <<EOF | kubectl apply -f - apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: mgc-catalog namespace: olm spec: sourceType: grpc image: quay.io/kuadrant/multicluster-gateway-controller-catalog:v6.5.4 grpcPodConfig: securityContextConfig: restricted displayName: mgc-catalog publisher: Red Hat EOF
3. Create a subscription\n```bash\n cat <<EOF | kubectl apply -f -\napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: multicluster-gateway-controller\n namespace: multi-cluster-gateways-system\nspec:\n channel: alpha\n name: multicluster-gateway-controller\n source: mgc-catalog\n sourceNamespace: olm\n installPlanApproval: Automatic\nEOF\n
- Create a operator group
bash cat <<EOF | kubectl apply -f - apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: og-mgc namespace: multi-cluster-gateways-system EOF
For more information on each of these OLM resources please see the offical docs
"},{"location":"architecture/docs/design/architectural-overview-v1/","title":"Kuadrant Architectural Overview","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#overview","title":"Overview","text":"Kuadrant provides connectivity, security and service protection capabilities in both a single and multi-cluster environment. It exposes these capabilities in the form of Kubernetes CRDs that implement the Gateway API concept of policy attachment. These policy APIs can target specific Gateway API resources such as Gateways
and HTTPRoutes
to extend their capabilities and configuration. They enable platform engineers to secure, protect and connect their infrastructure and allow application developers to self service and refine policies to their specific needs in order to protect exposed endpoints.
"},{"location":"architecture/docs/design/architectural-overview-v1/#key-architectural-areas","title":"Key Architectural Areas","text":" - Kuadrant architecture is defined and implemented with both control plane and data plane components.
- The control plane is where policies are exposed and expressed as Kubernetes APIs and reconciled by a policy controller.
- The data plane is where Kuadrant's \"policy enforcement\" components exist. These components are configured by the control plane and integrate either directly with the Gateway provider or via external integrations.
"},{"location":"architecture/docs/design/architectural-overview-v1/#10000m-architecture","title":"10000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"The control plane is a set of controllers and operators that are responsible for for installation and configuration of other components such as the data plane enforcement components and configuration of the Gateway to enable the data plane components to interact with incoming requests. The control plane also owns and reconciles the policy CRD APIs into more complex and specific configuration objects that the policy enforcement components consume in order to know the rules to apply to incoming requests or the configuration to apply to external integrations such as DNS and ACME providers.
"},{"location":"architecture/docs/design/architectural-overview-v1/#kuadrant-operator","title":"Kuadrant Operator","text":" - Installation and configuration of other control plane components
- Installation of data plane policy enforcement components via their respective control plane operators
- Configures the Gateway via WASM plugin and other APIs to leverage the data plane components for auth and rate limiting on incoming requests.
- Exposes
RateLimitPolicy
, AuthPolicy
, DNSPolicy
and TLSPolicy
and reconciles these into enforceable configuration for the data plane. - Exposes
Kuadrant
and reconciles this to configure and trigger installation of the required data plane components and other control plane components.
"},{"location":"architecture/docs/design/architectural-overview-v1/#limitador-operator","title":"Limitador Operator:","text":" - Installs and configures the Limitador data plane component based on the Limitador CR. Limits specified in the limitador CR are mountd via configmap into the limitador component.
"},{"location":"architecture/docs/design/architectural-overview-v1/#authorino-operator","title":"Authorino Operator:","text":" - Installs and configures the Authorino data plane component based on the Authorino CR.
"},{"location":"architecture/docs/design/architectural-overview-v1/#cert-manager","title":"Cert-Manager:","text":" - Manages TLS certificates for our components and for the Gateways. Consumes Certificate resources created by Kuadrant operator in response to the TLSPolicy.
"},{"location":"architecture/docs/design/architectural-overview-v1/#dns-operator","title":"DNS Operator","text":" - DNS operator consumes DNSRecord resources that are configured via the DNSPolicy api and applies them into the targeted cloud DNS provider AWS, Azure and Google DNS are our main targets
"},{"location":"architecture/docs/design/architectural-overview-v1/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"The data plane components sit in the request flow and are responsible for enforcing configuration defined by policy and providing service protection capabilities based on configuration managed and created by the control plane.
"},{"location":"architecture/docs/design/architectural-overview-v1/#limitador","title":"Limitador","text":" - Complies with the with Envoy rate limiting API to provide rate limiting to the gateway. Consumes limits from a configmap created based on the RateLimitPolicy API.
"},{"location":"architecture/docs/design/architectural-overview-v1/#authorino","title":"Authorino","text":" - Complies with the Envoy external auth API to provide auth integration to the gateway. It provides both Authn and Authz. Consumes AuthConfigs created by the kuadrant operator based on the defined
AuthPolicy
API.
"},{"location":"architecture/docs/design/architectural-overview-v1/#wasm-shim","title":"WASM Shim","text":" - Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting.
"},{"location":"architecture/docs/design/architectural-overview-v1/#single-cluster-layout","title":"Single Cluster Layout","text":"In a single cluster, you have the Kuadrant control plane and data plane sitting together. It is configured to integrate with Gateways on the same cluster and configure a DNS zone via a DNS provider secret (configured alongside a DNSPolicy). Storage of rate limit counters is possible but not required as they are not being shared.
"},{"location":"architecture/docs/design/architectural-overview-v1/#multi-cluster","title":"Multi-Cluster","text":"In the default multi-cluster setup. Each individual cluster has Kuadrant installed. Each of these clusters are unaware of the other. They are effectively operating as single clusters. The multi-cluster aspect is created by sharing access with the DNS zone, using a shared host across the clusters and leveraging shared counter storage. The zone is operated on independently by each of DNS operator on both clusters to form a single cohesive record set. More details on this can be found in the following RFC document: TODO add link. The rate limit counters can also be shared and used by different clusters in order to provide global rate limiting. This is achieved by connecting each instance of Limitador to a shared data store that uses the Redis protocol. There is another option available for achieving multi-cluster connectivity (see intgrations below) that requires the use of a \"hub\" cluster and integration with OCM (open cluster management).
Shown above is a multi-cluster multi ingress gateway topology. This might be used to support a geographically distributed system for example. However, it is also possible to leverage overlay networking tools such as Skupper that integrate at the Kubernetes service level to have a single gateway cluster that then integrates with multiple backends (on different clusters or in custom infrastructure).
"},{"location":"architecture/docs/design/architectural-overview-v1/#dependencies","title":"Dependencies","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#istio-required","title":"Istio: Required","text":" - Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities. Kuadrant configures Envoy via the Istio control plane in order to enforce the applied policies and register components such as Authorino and Limitador.
- Used by
RateLimitPolicy
and AuthPolicy
"},{"location":"architecture/docs/design/architectural-overview-v1/#gateway-api-required","title":"Gateway API: Required","text":" - New standard for Ingress from the Kubernetes community
- Gateway API is the core API that Kuadrant integrates with.
"},{"location":"architecture/docs/design/architectural-overview-v1/#integrations","title":"Integrations","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#open-cluster-manager-optional","title":"Open Cluster Manager: Optional","text":" - Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
While the default setup is to leverage a distributed configuration for DNS and rate limiting. There is also a component that offers experimental integration with Open Cluster Management.
In this setup, the OCM integration controller is installed into the HUB alongside the DNS Operator and the cert-manager. This integration allows you to define gateways in the Hub and distribute them to \"spoke\" clusters. The addresses of these gateways are gathered from the spokes and aggregated back to the hub. The Kuadrant operator and DNS operator then act on this information as though it were a single cluster gateway with multiple addresses. The DNS zone in the configured DNS provider is managed centrally by one DNS operator instance.
"},{"location":"architecture/docs/design/architectural-overview/","title":"Kuadrant Architectural Overview [Draft]","text":""},{"location":"architecture/docs/design/architectural-overview/#overview","title":"Overview","text":"It is important to note that Kuadrant is not in itself a gateway provider. Kuadrant provides a set of valuable policy APIs that enhance Gateway API via its defined policy attachment extension point. The policy APIs are reconciled by a set of policy controllers and enforced via integration at different points to configure, enhance and secure the application connectivity provided via Gateway API and the underlying gateway provider. These policy extensions are focused around areas such as DNS management supporting global load balancing and health checks, alongside service protection specific APIs such as rate limiting and auth. Kuadrant also integrates with Open Cluster Management as a multi-cluster control plane to enable defining and distributing Gateways across multiple clusters, providing load balancing and tls management for these distributed gateways. These integrations and features can be managed centrally in a declarative way from the Open Cluster Management Hub using Kubernetes resources.
"},{"location":"architecture/docs/design/architectural-overview/#key-architectural-areas","title":"Key Architectural Areas","text":" - The Kuadrant architecture is spread across a control plane and also a data plane. Kuadrant can work in both a single and multi-cluster context. Currently in order for all APIs to work in a single or multi-cluster context you need to have Open Cluster Management installed. While this may change in the future, this approach allows us to start with a single cluster and seamlessly scale as more clusters are added.
- The control plane is where policies are exposed and expressed as kubernetes APIs and reconciled by the Kuadrant policy controllers.
- The data plane is where Kuadrant's service protection components, configured by the control plane policies, are enforced within the gateway instance as part of the request flow.
"},{"location":"architecture/docs/design/architectural-overview/#1000m-architecture","title":"1000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"A control plane component is something responsible for accepting instruction via a CRD based API and ensuring that configuration is manifested into state that can be acted on.
"},{"location":"architecture/docs/design/architectural-overview/#kuadrant-operator","title":"Kuadrant Operator","text":" - Installation of data plane service protection components via their respective operators
- Exposes
RateLimitPolicy
and AuthPolicy
and is currently the policy controller for these APIs - Configures the Gateway to be able to leverage the data plane service protection components
"},{"location":"architecture/docs/design/architectural-overview/#multi-cluster-gateway-controller","title":"Multi-Cluster Gateway Controller","text":" - Exposes
DNSPolicy
and TLSPolicy
- Configures DNS providers (e.g AWS Route 53) and TLS providers
- Focused around use cases involving distributed gateways (for example across clouds or geographic regions)
- Integrates with Open Cluster Management as the multi-cluster management hub to distribute and observe gateway status based on the clusters they are deployed to. Works directly with Open Cluster Management APIs such
PlacementDecision
and ManifestWork
.
"},{"location":"architecture/docs/design/architectural-overview/#kuadrant-add-on-manager","title":"Kuadrant-add-on-manager","text":" - Sub component in the gateway controller repository
- Follows the add-on pattern from Open Cluster Management
- Responsible for configuring and installing Kuadrant into a target spoke cluster
"},{"location":"architecture/docs/design/architectural-overview/#limitador-operator","title":"Limitador Operator:","text":" - Installs and configures Limitador
"},{"location":"architecture/docs/design/architectural-overview/#authorino-operator","title":"Authorino Operator:","text":" - Installs and configures Authorino
"},{"location":"architecture/docs/design/architectural-overview/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"A data plane component sits in the request flow and is responsible for enforcing policy and providing service protection capabilities based on configuration managed and created by the control plane.
"},{"location":"architecture/docs/design/architectural-overview/#limitador","title":"Limitador","text":" - Complies with the with Envoy rate limiting API to provide rate limiting to the gateway
"},{"location":"architecture/docs/design/architectural-overview/#authorino","title":"Authorino","text":" - Complies with the Envoy external auth API to provide auth integration to the gateway
"},{"location":"architecture/docs/design/architectural-overview/#wasm-shim","title":"WASM Shim","text":" - Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting
"},{"location":"architecture/docs/design/architectural-overview/#dependencies-and-integrations","title":"Dependencies and integrations","text":"In order to provide its full suite of functionality, Kuadrant has several dependencies. Some of these are optional depending on the functionality needed.
"},{"location":"architecture/docs/design/architectural-overview/#cert-manager-required","title":"Cert-Manager: Required","text":" - Provides TLS integration
- Used by
TLSPolicy
and Authorino.
"},{"location":"architecture/docs/design/architectural-overview/#open-cluster-manager-required","title":"Open Cluster Manager: Required","text":" - Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
"},{"location":"architecture/docs/design/architectural-overview/#istio-required","title":"Istio: Required","text":" - Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities.
- Used by
RateLimitPolicy
and AuthPolicy
"},{"location":"architecture/docs/design/architectural-overview/#gateway-api-required","title":"Gateway API: Required","text":" - New standard for Ingress from the Kubernetes community
- Gateway API is the core API that Kuadrant integrates with.
"},{"location":"architecture/docs/design/architectural-overview/#thanosprometheusgrafana-optional","title":"Thanos/Prometheus/Grafana: Optional","text":" - Provides observability integration
- Rather than providing any Kuadrant specific observability tooling, we instead look to leverage existing tools and technologies to provide observability capabilities for ingress.
"},{"location":"architecture/docs/design/architectural-overview/#high-level-multi-cluster-architecture","title":"High Level Multi-Cluster Architecture","text":"Kuadrant has a multi-cluster gateway controller that is intended to run in a Open Cluster Management provided \"Hub\" cluster. This cluster is effectively a central management cluster where policy and gateways along with all that Open Cluster Management offers can be defined and distributed to the managed \"spoke\" clusters.
"},{"location":"architecture/docs/design/architectural-overview/#single-cluster","title":"Single cluster","text":"In a single cluster context, the overall architecture remains the same as above, the key difference is that the Hub and Spoke cluster are now a single cluster rather than multiple clusters. This is how we are initially supporting single cluster.
"},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-leverage-open-cluster-management","title":"How does Kuadrant leverage Open Cluster Management?","text":"Kuadrant deploys a multi-cluster gateway controller into the Open Cluster Management hub (a control plane that manages a set of \"spoke\" clusters where workloads are executed). This controller offers its own APIs but also integrates with hub CRD based APIs (such as the placement API) along with the Gateway API CRD based APIs in order to provide multi-cluster Gateway capabilities to the hub and distribute actual gateway instances to the spokes. See the Open Cluster Management docs for further details on the hub spoke architecture.
As part of installing Kuadrant, the Gateway API CRDs are also installed into the hub cluster and Kuadrant defines a standard Gateway API GatewayClass
resource that the multi-cluster gateway controller is the chosen controller for.
Once installed, an Open Cluster Management user can then (with the correct RBAC in place) define in the standard way a Gateway resource that inherits from the Kuadrant configured GatewayClass
in the hub. There is nothing unique about this Gateway definition, the difference is what it represents and how it is used. This Gateway is used to represent a \"multi-cluster\" distributed gateway. As such there are no pods running behind this Gateway instance in the hub cluster, instead it serves as a template that the Kuadrant multi-cluster gateway controller reconciles and distributes to targeted spoke clusters. It leverages the Open Cluster Management APIs to distribute these gateways (more info below) and aggregates the status information from each spoke cluster instance of this gateway back to this central definition, in doing this it can represent the status of the gateway across multiple clusters but also use that information to integrate with DNS providers etc.
"},{"location":"architecture/docs/design/architectural-overview/#gateway-deployment-and-distribution","title":"Gateway Deployment and Distribution","text":"In order for a multi-cluster gateway to be truly useful, it needs to be distributed or \"placed\" on a specific set of hub managed spoke clusters. Open Cluster Management is responsible for a set of placement and replication APIs. Kuadrant is aware of these APIs, and so when a given gateway is chosen to be placed on a set of managed clusters, Kuadrant multi-cluster gateway controller will ensure the right resources (ManifestWork
) are created in the correct namespaces in the hub. Open Cluster Management then is responsible for syncing these to the actual spoke cluster and reporting back the status of these resources to the Hub. A user would indicate which clusters they want a gateway placed on by using a Placement
and then labeling the gateway using the cluster.open-cluster-management.io/placement
label.
In order for the Gateway to be instantiated, we need to know what underlying gateway provider is being used on the spoke clusters. Admins can then set this provider in the hub via the GatewayClass params. In the hub, Kuadrant will then apply a transformation to the gateway to ensure when synced it references this spoke gateway provider (Istio for example).
It is the Open Cluster Management workagent that is responsible for syncing down and applying the resources into the managed spoke cluster. It is also responsible for syncing status information back to the hub. It is the multi-cluster gateway controller that is responsible for aggregating this status.
The status information reported back to the Hub is used by the multi-cluster gateway controller to know what LB hosts / IPAddresses to use for DNSRecords that it creates and manages.
More info on the Open Cluster Management hub and spoke architecture can be found here
"},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-integrate-with-gateway-providers","title":"How does Kuadrant integrate with Gateway Providers?","text":"Currently the Kuadrant data plane only integrates with an Istio based gateway provider:
- It registers Authorino with the
IstioOperator
as an auth provider so that Authorino can be used as an external auth provider. - It leverages an
EnvoyFilter
to register the rate limiting service as an upstream service. - Based on the Kuadrant
AuthPolicy
, it leverages Istio's AuthorizationPolicy
resource to configure when a request should trigger Authorino to be called for a given host, path and method etc. - It provides a WebAssembly (WASM) Plugin that conforms to the Proxy WASM ABI (application binary interface). This WASM Plugin is loaded into the underlying Envoy based gateway provider and configured via the Kuadrant Operator based on defined
RateLimitPolicy
resources. This binary is executed in response to a HTTP request being accepted by the gateway via the underlying Envoy instance that provides the proxy layer for the Gateway (IE Envoy). This plugin is configured with the correct upstream rate limit service name and when it sees a request, based on the provided configuration, it will trigger a call to the installed Limitador that is providing the rate limit capabilities and either allow the request to continue or trigger a response to the client with a 429 (too many requests) HTTP code.
"},{"location":"architecture/docs/design/architectural-overview/#data-flows","title":"Data Flows","text":"There are several different data flows when using Kuadrant.
"},{"location":"architecture/docs/design/architectural-overview/#control-plane-configuration-and-status-reporting","title":"Control plane configuration and status reporting","text":"The initial creation of these APIs (gateways, policies etc) is done by the relevant persona in the control plane just as they would any other k8s resource. We use the term cluster admin or gateway admin as the operations type persona configuring, and placing gateways. As shown above, in a multi-cluster configuration. API definitions are pulled from the Hub and \"manifested\" into the spokes. The Status of those synced resources are reported back to the Hub. The same happens for a single cluster, the only difference being the work agent hub controllers are all installed on one cluster.
"},{"location":"architecture/docs/design/architectural-overview/#third-party-enforcement-and-integration","title":"Third party enforcement and Integration","text":"In order to enforce the policy configuration, components in the control plane and data plane can reach out to configured 3rd parties such as cloud based DNS provider, TLS providers and Auth providers.
"},{"location":"architecture/docs/design/architectural-overview/#request-flow","title":"Request Flow","text":"Requests coming through the gateway instance can be sent to Limitador based on configuration of the WASM plugin installed into the Envoy based gateway provider or to Authorino based configuration provided by the Istio AuthorizationPolicy
. Each of these components have the capability to see the request and need to in order to make the required decision. Each of these components can also prevent the request from reaching its intended backend destination based on user configuration.
"},{"location":"architecture/docs/design/architectural-overview/#auth","title":"Auth","text":"As all of the APIs are CRDs, auth around creating these resources is handled in the standard way IE by the kubernetes cluster and RBAC. There is no relationship by default between the Auth features provided by Authorino to application developers and the auth requirements of the cluster API server.
For Auth between Spoke and Hub see Open Cluster Management docs
"},{"location":"architecture/docs/design/architectural-overview/#observability","title":"Observability","text":"Kuadrant doesn't provide any specific observability components, but rather provides a reference setup using well known and established components along with some useful dashboards to help observe key things around the Gateways. The focus of this setup, is in the context of a multi-cluster setup where Open Cluster Management is installed and gateways are being defined and distributed from that hub.
"},{"location":"architecture/docs/design/architectural-overview/#some-notes-on-future-direction","title":"Some notes on future direction","text":"This section is here to provide some insight into architectural changes that may be seen in the near future:
What is in this doc represents the architecture at point our MVP release. Below are some areas that we have identified that are likely to change in the coming releases. As these happen, this doc will also evolve.
- We want to separate out the ocm integration into its own controller so that policies can evolve without a coupling to any one multi-cluster management solution
- We want to separate the policies into their own controller that is capable of supporting both single (without Open Cluster Management) and multi-cluster (with Open Cluster Management enabled) contexts, so that the barrier to entry is reduced for those starting with a single cluster
- We want to allow for an on cluster DNS Provider such as CoreDNS so that we can provide an implementation that is disconnected from any cloud provider and provides more flexible DNS setups.
- We will look to reduce our integration with Istio and want to provide integration with additional gateway providers such as EnvoyGateway
"},{"location":"architecture/docs/design/modular_installation/","title":"Kuadrant Proposal - Modular Installation","text":"Kuadrant is developing a set of loosely coupled functionalities built directly on top of Kubernetes. Kuadrant aims to allow customers to just install, use and understand those functionalities they need.
"},{"location":"architecture/docs/design/modular_installation/#problem-statement","title":"Problem Statement","text":"Currently, the installation tool of kuadrant, the kuadrantctl CLI, installs all or nothing. Installing more than the customer needs adds unneeded complexity and operational effort. For example, if a customer is looking for rate limiting and not interested in authentication functionality, then the customer should be able to just install and run that part of Kuadrant.
"},{"location":"architecture/docs/design/modular_installation/#high-level-goals","title":"High Level Goals","text":" - Install only required components. Operate only required components.
Reduce system complexity and operational effort to the minimum required. Components in this context make reference to deployments and running instances.
- Expose only the activated functionalities
A user of a partial Kuadrant install should not be confronted with data in custom resources that has no meaning or is not accessible in their partial Kuadrant install. The design of the kuadrant API should have this goal into account.
"},{"location":"architecture/docs/design/modular_installation/#proposed-solution","title":"Proposed Solution","text":"The kuadrant installation mechanism should offer modular installation to enable/disable loosely coupled pieces of kuadrant. Modular installation options should be feature oriented rather than deployment component oriented. Then, it is up to the installation tool to decide what components need to be deployed and how to configure it.
Each feature, or part of it, is eligible to be included or excluded when installing kuadrant.
Some profiles can be defined to group set of commonly required features. Naming the profiles allows the customer to easily express wanted installation configuration. Furthermore, profiles not only can be used to group a set of features, profiles can be used to define deployment options.
Name Description Minimal Minimal installation required to run an API without any protection, analytics or API management. Default deployment option AuthZ Authentication and authorization mechanisms activated RateLimit Basic rate limit (only pre-auth rate limit) features Full Full featured kuadrant installation A kuadrant operator, together with a design of a kuadrant CRD is desired. Not only for kuadrant installation, but also for lifecycle management. Additionally, the kuadrantctl CLI tool can also be useful to either deploy kuadrant components and manifests or just deploy the kuadrant operator.
The kuadrant control plane should be aware of the installed profile via env vars or command line params in the control plane running components. With that information, the control plane can decide to enable or disable CRD watching, label and annotation monitoring and ultimately reject any configuration object that relies on disabled functionality. The least a customer can expect from kuadrant is to be consistent and reject any functionality request that cannot provide.
"},{"location":"architecture/rfcs/0001-rlp-v2/","title":"RateLimitPolicy API v2","text":" - Feature Name:
rlp-v2
- Start Date: 2023-02-02
- RFC PR: Kuadrant/architecture#12
- Issue tracking: Kuadrant/architecture#13
"},{"location":"architecture/rfcs/0001-rlp-v2/#summary","title":"Summary","text":"Proposal of new API for the Kuadrant's RateLimitPolicy
(RLP) CRD, for improved UX.
"},{"location":"architecture/rfcs/0001-rlp-v2/#motivation","title":"Motivation","text":"The RateLimitPolicy
API (v1beta1), particularly its RateLimit
type used in ratelimitpolicy.spec.rateLimits
, designed in part to fit the underlying implementation based on the Envoy Rate limit filter, has been proven to be complex, as well as somewhat limiting for the extension of the API for other platforms and/or for supporting use cases of not contemplated in the original design.
Users of the RateLimitPolicy
will immediately recognize elements of Envoy's Rate limit API in the definitions of the RateLimit
type, with almost 1:1 correspondence between the Configuration
type and its counterpart in the Envoy configuration. Although compatibility between those continue to be desired, leaking such implementation details to the level of the API can be avoided to provide a better abstraction for activators (\"matchers\") and payload (\"descriptors\"), stated by users in a seamless way.
Furthermore, the Limit
type \u2013 used as well in the RLP's RateLimit
type \u2013 implies presently a logical relationship between its inner concepts \u2013 i.e. conditions and variables on one side, and limits themselves on the other \u2013 that otherwise could be shaped in a different manner, to provide clearer understanding of the meaning of these concepts by the user and avoid repetition. I.e., one limit definition contains multiple rate limits, and not the other way around.
"},{"location":"architecture/rfcs/0001-rlp-v2/#goals","title":"Goals","text":" - Decouple the API from the underlying implementation - i.e. provide a more generic and more user-friendly abstraction
- Prepare the API for upcoming changes in the Gateway API Policy Attachment specification
- Improve consistency of the API with respect to Kuadrant's AuthPolicy CRD - i.e. same language, similar UX
"},{"location":"architecture/rfcs/0001-rlp-v2/#current-wip-to-consider","title":"Current WIP to consider","text":" - Policy attachment update (kubernetes-sigs/gateway-api#1565)
- No merging of policies (kuadrant/architecture#10)
- A single Policy scoped to HTTPRoutes and HTTPRouteRule (kuadrant/architecture#4) - future
- Implement
skip_if_absent
for the RequestHeaders action (kuadrant/wasm-shim#29)
"},{"location":"architecture/rfcs/0001-rlp-v2/#highlights","title":"Highlights","text":" spec.rateLimits[]
replaced with spec.limits{<limit-name>: <limit-definition>}
spec.rateLimits.limits
replaced with spec.limits.<limit-name>.rates
spec.rateLimits.limits.maxValue
replaced with spec.limits.<limit-name>.rates.limit
spec.rateLimits.limits.seconds
replaced with spec.limits.<limit-name>.rates.duration
+ spec.limits.<limit-name>.rates.unit
spec.rateLimits.limits.conditions
replaced with spec.limits.<limit-name>.when
, structured field based on well-known selectors, mainly for expressing conditions not related to the HTTP route (although not exclusively) spec.rateLimits.limits.variables
replaced with spec.limits.<limit-name>.counters
, based on well-known selectors spec.rateLimits.rules
replaced with spec.limits.<limit-name>.routeSelectors
, for selecting (or \"sub-targeting\") HTTPRouteRules that trigger the limit - new matcher
spec.limits.<limit-name>.routeSelectors.hostnames[]
spec.rateLimits.configurations
removed \u2013 descriptor actions configuration (previously spec.rateLimits.configurations.actions
) generated from spec.limits.<limit-name>.when.selector
\u222a spec.limits.<limit-name>.counters
and unique identifier of the limit (associated with spec.limits.<limit-name>.routeSelectors
) - Limitador conditions composed of \"soft\"
spec.limits.<limit-name>.when
conditions + a \"hard\" condition that binds the limit to its trigger HTTPRouteRules
For detailed differences between current and new RLP API, see Comparison to current RateLimitPolicy.
"},{"location":"architecture/rfcs/0001-rlp-v2/#guide-level-explanation","title":"Guide-level explanation","text":""},{"location":"architecture/rfcs/0001-rlp-v2/#examples-of-rlps-based-on-the-new-api","title":"Examples of RLPs based on the new API","text":"Given the following network resources:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: Gateway\nmetadata:\n name: istio-ingressgateway\n namespace: istio-system\nspec:\n gatewayClassName: istio\n listeners:\n\n - hostname:\n - \"*.acme.com\"\n---\napiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
The following are examples of RLPs targeting the route and the gateway. Each example is independent from the other.
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-1-minimal-example-network-resource-targeted-entirely-without-filtering-unconditional-and-unqualified-rate-limiting","title":"Example 1. Minimal example - network resource targeted entirely without filtering, unconditional and unqualified rate limiting","text":"In this example, all traffic to *.toystore.acme.com
will be limited to 5rps, regardless of any other attribute of the HTTP request (method, path, headers, etc), without any extra \"soft\" conditions (conditions non-related to the HTTP route), across all consumers of the API (unqualified rate limiting).
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-infra-rl\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n base: # user-defined name of the limit definition - future use for handling hierarchical policy attachment\n\n - rates: # at least one rate limit required\n - limit: 5\n unit: second\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-infra-rl/base\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-infra-rl/base == \"1\"\n max_value: 5\n seconds: 1\n namespace: TDB\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-2-targeting-specific-route-rules-with-counter-qualifiers-multiple-rates-per-limit-definition-and-soft-conditions","title":"Example 2. Targeting specific route rules, with counter qualifiers, multiple rates per limit definition and \"soft\" conditions","text":"In this example, a distinct limit will be associated (\"bound\") to each individual HTTPRouteRule of the targeted HTTPRoute, by using the routeSelectors
field for selecting (or \"sub-targeting\") the HTTPRouteRule.
The following limit definitions will be bound to each HTTPRouteRule:
/toys*
\u2192 50rpm, enforced per username (counter qualifier) and only in case the user is not an admin (\"soft\" condition). /assets/*
\u2192 5rpm / 100rp12h
Each set of trigger matches in the RLP will be matched to all HTTPRouteRules whose HTTPRouteMatches is a superset of the set of trigger matches in the RLP. For every HTTPRouteRule matched, the HTTPRouteRule will be bound to the corresponding limit definition that specifies that trigger. In case no HTTPRouteRule is found containing at least one HTTPRouteMatch that is identical to some set of matching rules of a particular limit definition, the limit definition is considered invalid and reported as such in the status of RLP.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-endpoint\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toys:\n rates:\n\n - limit: 50\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n\n assets:\n rates:\n\n - limit: 5\n duration: 1\n unit: minute\n - limit: 100\n duration: 12\n unit: hour\n routeSelectors:\n - matches: # matches the 2nd HTTPRouteRule (i.e. /assets/*)\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/toys\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.group\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"group\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n- rules:\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/assets\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-endpoint/toys == \"1\"\n - auth.identity.group != \"admin\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 60\n namespace: kuadrant\n- conditions:\n - toystore/toystore-per-endpoint/assets == \"1\"\n max_value: 5\n seconds: 60\n namespace: kuadrant\n- conditions:\n - toystore/toystore-per-endpoint/assets == \"1\"\n max_value: 100\n seconds: 43200 # 12 hours\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-3-targeting-a-subset-of-an-httprouterule-httproutematch-missing","title":"Example 3. Targeting a subset of an HTTPRouteRule - HTTPRouteMatch missing","text":"Consider a 150rps rate limit set on requests to GET /toys/special
. Such specific application endpoint is covered by the first HTTPRouteRule in the HTTPRoute (as a subset of GET
or POST
to any path that starts with /toys
). However, to avoid binding limits to HTTPRouteRules that are more permissive than the actual intended scope of the limit, the RateLimitPolicy controller requires trigger matches to find identical matching rules explicitly defined amongst the sets of HTTPRouteMatches of the HTTPRouteRules potentially targeted.
As a consequence, by simply defining a trigger match for GET /toys/special
in the RLP, the GET|POST /toys*
HTTPRouteRule will NOT be bound to the limit definition. In order to ensure the limit definition is properly bound to a routing rule that strictly covers the GET /toys/special
application endpoint, first the user has to modify the spec of the HTTPRoute by adding an explicit HTTPRouteRule for this case:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n - matches: # new (more specific) HTTPRouteRule added\n - path:\n type: Exact\n value: \"/toys/special\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\n
After that, the RLP can target the new HTTPRouteRule strictly:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-special-toys\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n specialToys:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the new HTTPRouteRule (i.e. GET /toys/special)\n - path:\n type: Exact\n value: \"/toys/special\"\n method: GET\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys/special\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-special-toys/specialToys\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-special-toys/specialToys == \"1\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-4-targeting-a-subset-of-an-httprouterule-httproutematch-found","title":"Example 4. Targeting a subset of an HTTPRouteRule - HTTPRouteMatch found","text":"This example is similar to Example 3. Consider the use case of setting a 150rpm rate limit on requests to GET /toys*
.
The targeted application endpoint is covered by the first HTTPRouteRule in the HTTPRoute (as a subset of GET
or POST
to any path that starts with /toys
). However, unlike in the previous example where, at first, no HTTPRouteRule included an explicit HTTPRouteMatch for GET /toys/special
, in this example the HTTPRouteMatch for the targeted application endpoint GET /toys*
does exist explicitly in one of the HTTPRouteRules, thus the RateLimitPolicy controller would find no problem to bind the limit definition to the HTTPRouteRule. That would nonetheless cause a unexpected behavior of the limit triggered not strictly for GET /toys*
, but also for POST /toys*
.
To avoid extending the scope of the limit beyond desired, with no extra \"soft\" conditions, again the user must modify the spec of the HTTPRoute, so an exclusive HTTPRouteRule exists for the GET /toys*
application endpoint:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches: # first HTTPRouteRule split into two \u2013 one for GET /toys*, other for POST /toys*\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
The RLP can then target the new HTTPRouteRule strictly:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toy-readers\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toyReaders:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the new more specific HTTPRouteRule (i.e. GET /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toy-readers/toyReaders\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toy-readers/toyReaders == \"1\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-5-one-limit-triggered-by-multiple-httprouterules","title":"Example 5. One limit triggered by multiple HTTPRouteRules","text":"In this example, both HTTPRouteRules, i.e. GET|POST /toys*
and /assets/*
, are targeted by the same limit of 50rpm per username.
Because the HTTPRoute has no other rule, this is technically equivalent to targeting the entire HTTPRoute and therefore similar to Example 1. However, if the HTTPRoute had other rules or got other rules added afterwards, this would ensure the limit applies only to the two original route rules.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-user\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toysOrAssetsPerUsername:\n rates:\n\n - limit: 50\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-user/toysOrAssetsPerUsername\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-user/toysOrAssetsPerUsername == \"1\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 60\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-6-multiple-limit-definitions-targeting-the-same-httprouterule","title":"Example 6. Multiple limit definitions targeting the same HTTPRouteRule","text":"In case multiple limit definitions target a same HTTPRouteRule, all those limit definitions will be bound to the HTTPRouteRule. No limit \"shadowing\" will be be enforced by the RLP controller. Due to how things work as of today in Limitador nonetheless (i.e. the rule of the most restrictive limit wins), in some cases, across multiple limits triggered, one limit ends up \"shadowing\" others, depending on further qualification of the counters and the actual RL values.
E.g., the following RLP intends to set 50rps per username on GET /toys*
, and 100rps on POST /toys*
or /assets/*
:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-endpoint\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n readToys:\n rates:\n\n - limit: 50\n unit: second\n counters:\n - auth.identity.username\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n\n postToysOrAssets:\n rates:\n\n - limit: 100\n unit: second\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n - matches: # matches the 2nd HTTPRouteRule (i.e. /assets/*)\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/readToys\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/readToys\"\n descriptor_value: \"1\"\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/postToysOrAssets\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions: # actually applies to GET|POST /toys*\n - toystore/toystore-per-endpoint/readToys == \"1\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 1\n namespace: kuadrant\n- conditions: # actually applies to GET|POST /toys* and /assets/*\n - toystore/toystore-per-endpoint/postToysOrAssets == \"1\"\n max_value: 100\n seconds: 1\n namespace: kuadrant\n
This example was only written in this way to highlight that it is possible that multiple limit definitions select a same HTTPRouteRule. To avoid over-limiting between GET|POST /toys*
and thus ensure the originally intended limit definitions for each of these routes apply, the HTTPRouteRule should be split into two, like done in Example 4.
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-7-limits-triggered-for-specific-hostnames","title":"Example 7. Limits triggered for specific hostnames","text":"In the previous examples, the limit definitions and therefore the counters were set indistinctly for all hostnames \u2013 i.e. no matter if the request is sent to games.toystore.acme.com
or dolls.toystore.acme.com
, the same counters are expected to be affected. In this example on the other hand, a 1000rpd rate limit is set for requests to /assets/*
only when the hostname matches games.toystore.acme.com
.
First, the user needs to edit the HTTPRoute to make the targeted hostname games.toystore.acme.com
explicit:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n - games.toystore.acme.com # new (more specific) hostname added\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
After that, the RLP can target specifically the newly added hostname:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-hostname\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n games:\n rates:\n\n - limit: 1000\n unit: day\n routeSelectors:\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n hostnames:\n - games.toystore.acme.com\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/assets/*\"]\n hosts: [\"games.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-hostname/games\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-hostname/games == \"1\"\n max_value: 1000\n seconds: 86400 # 1 day\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-8-targeting-the-gateway","title":"Example 8. Targeting the Gateway","text":"Note: Additional meaning and context may be given to this use case in the future, when discussing defaults and overrides.
Targeting a Gateway is a shortcut to targeting all individual HTTPRoutes referencing the gateway as parent. This differs from Example 1 nonetheless because, by targeting the gateway rather than an individual HTTPRoute, the RLP applies automatically to all HTTPRoutes pointing to the gateway, including routes created before and after the creation of the RLP. Moreover, all those routes will share the same limit counters specified in the RLP.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: gw-rl\n namespace: istio-ingressgateway\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: istio-ingressgateway\n limits:\n base:\n\n - rates:\n - limit: 5\n unit: second\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"istio-system/gw-rl/base\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - istio-system/gw-rl/base == \"1\"\n max_value: 5\n seconds: 1\n namespace: TDB\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#comparison-to-current-ratelimitpolicy","title":"Comparison to current RateLimitPolicy","text":"Current New Reason 1:1 relation between Limit (the object) and the actual Rate limit (the value) (spec.rateLimits.limits
) Rate limit becomes a detail of Limit where each limit may define one or more rates (1:N) (spec.limits.<limit-name>.rates
) - It allows to reuse
when
conditions and counters
for groups of rate limits
Parsed spec.rateLimits.limits.conditions
field, directly exposing the Limitador's API Structured spec.limits.<limit-name>.when
condition field composed of 3 well-defined properties: selector
, operator
and value
- Feels more K8s-native
- Consistent with github.com/kuadrant/authorino/api/v1beta1#JSONPatternExpression
- No need for a parser (only if implemented by Limitador)
spec.rateLimits.configurations
as a list of \"variables assignments\" and direct exposure of Envoy's RL descriptor actions API Descriptor actions composed from selectors used in the limit definitions (spec.limits.<limit-name>.when.selector
and spec.limits.<limit-name>.counters
) plus a fixed identifier of the route rules (spec.limits.<limit-name>.routeSelectors
) - Abstract the Envoy-specific concepts of \"actions\" and \"descriptors\"
- No risk of mismatching descriptors keys between \"actions\" and actual usage in the limits
- No user-defined generic descriptors (e.g. \"limited = 1\")
- Source value of the selectors defined from an implicit \"context\" data structure
Key-value descriptors Structured descriptors referring to a contextual well-known data structure - Consistent with Authorino's Authorization JSON (#context)
Limitador conditions independent from the route rules Artificial Limitador condition injected to bind routes and corresponding limits - Ensure the limit is enforced only for corresponding selected HTTPRouteRules
translate(spec.rateLimits.rules) \u2282 httproute.spec.rules
spec.limits.<limit-name>.routeSelectors.matches \u2286 httproute.spec.rules.matches
- HTTPRouteRule selector (via HTTPRouteMatch subset)
- Gateway API language
- Preparation for inherited policies and defaults & overrides
spec.rateLimits.limits.seconds
spec.limits.<limit-name>.rates.duration
and spec.limits.<limit-name>.rates.unit
- Support for more units beyond seconds
duration: 1
by default
spec.rateLimits.limits.variables
spec.limits.<limit-name>.counters
- Improved (more specific) naming
spec.rateLimits.limits.maxValue
spec.limits.<limit-name>.rates.limit
- Improved (more generic) naming
"},{"location":"architecture/rfcs/0001-rlp-v2/#reference-level-explanation","title":"Reference-level explanation","text":"By completely dropping out the configurations
field from the RLP, composing the RL descriptor actions is now done based essentially on the selectors listed in the when
conditions and the counters
, plus an artificial condition used to bind the HTTPRouteRules to the corresponding limits to trigger in Limitador.
The descriptor actions composed from the selectors in the \"soft\" when
conditions and counter qualifiers originate from the direct references these selectors make to paths within a well-known data structure that stores information about the context (HTTP request and ext-authz filter). These selectors in \"soft\" when
conditions and counter qualifiers are thereby called well-known selectors.
Other descriptor actions might be composed by the RLP controller to define additional RL conditions to bind HTTPRouteRules and corresponding limits.
"},{"location":"architecture/rfcs/0001-rlp-v2/#well-known-selectors","title":"Well-known selectors","text":"Each selector used in a when
condition or counter qualifier is a direct reference to a path within a well-known data structure that stores information about the context
(L4 and L7 data of the original request handled by the proxy), as well as auth
data (dynamic metadata occasionally exported by the external authorization filter and injected by the proxy into the rate-limit filter).
The well-known data structure for building RL descriptor actions resembles Authorino's \"Authorization JSON\", whose context
component consists of Envoy's AttributeContext
type of the external authorization API (marshalled as JSON). Compared to the more generic RateLimitRequest
struct, the AttributeContext
provides a more structured and arguibly more intuitive relation between the data sources for the RL descriptors actions and their corresponding key names through which the values are referred within the RLP, in a context of predominantly serving for HTTP applications.
To keep compatibility with the Envoy Rate Limit API, the well-known data structure can optionally be extended with the RateLimitRequest
, thus resulting in the following final structure.
context: # Envoy's Ext-Authz `CheckRequest.AttributeContext` type\n source:\n address: \u2026\n service: \u2026\n \u2026\n destination:\n address: \u2026\n service: \u2026\n \u2026\n request:\n http:\n host: \u2026\n path: \u2026\n method: \u2026\n headers: {\u2026}\n\nauth: # Dynamic metadata exported by the external authorization service\n\nratelimit: # Envoy's Rate Limit `RateLimitRequest` type\n domain: \u2026 # generated by the Kuadrant controller\n descriptors: {\u2026} # descriptors configured by the user directly in the proxy (not generated by the Kuadrant controller, if allowed)\n hitsAddend: \u2026 # only in case we want to allow users to refer to this value in a policy\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#mechanics-of-generating-rl-descriptor-actions","title":"Mechanics of generating RL descriptor actions","text":"From the perspective of a user who writes a RLP, the selectors used in then when
and counters
fields are paths to the well-known data structure (see Well-known selectors). While desiging a policy, the user intuitively pictures the well-known data structure and states each limit definition having in mind the possible values assumed by each of those paths in the data plane. For example,
The user story:
Each distinct user (auth.identity.username
) can send no more than 1rps to the same HTTP path (context.request.http.path
).
...materializes as the following RLP:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n dolls:\n rates:\n\n - limit: 1\n unit: second\n counters:\n - auth.identity.username\n - context.request.http.path\n
The following selectors are to be interpreted by the RLP controller:
auth.identity.username
context.request.http.path
The RLP controller uses a map to translate each selector into its corresponding descriptor action. (Roughly described:)
context.source.address \u2192 source_cluster(...) # TBC\ncontext.source.service \u2192 source_cluster(...) # TBC\ncontext.destination... \u2192 destination_cluster(...)\ncontext.destination... \u2192 destination_cluster(...)\ncontext.request.http.<X> \u2192 request_headers(header_name: \":<X>\")\ncontext.request... \u2192 ...\nauth.<X> \u2192 metadata(key: \"envoy.filters.http.ext_authz\", path: <X>)\nratelimit.domain \u2192 <hostname>\n
...to yield effectively:
rate_limits:\n\n- actions:\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n - request_headers:\n descriptor_key: \"context.request.http.path\"\n header_name: \":path\"\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#artificial-limitador-condition-for-routeselectors","title":"Artificial Limitador condition for routeSelectors
","text":"For each limit definition that explicitly or implicitly defines a routeSelectors
field, the RLP controller will generate an artificial Limitador condition that ensures that the limit applies only when the filterred rules are honoured when serving the request. This can be implemented with a 2-step procedure:
- generate an unique identifier of the limit - i.e.
<policy-namespace>/<policy-name>/<limit-name>
- associate a
generic_key
type descriptor action with each HTTPRouteRule
targeted by the limit \u2013 i.e. { descriptor_key: <unique identifier of the limit>, descriptor_value: \"1\" }
.
For example, given the following RLP:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-non-admin-users\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toys:\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n rates:\n - limit: 50\n duration: 1\n unit: minute\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n\n assets:\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n rates:\n - limit: 5\n duration: 1\n unit: minute\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n
Apart from the following descriptor action associated with both routes:
- metadata:\n descriptor_key: \"auth.identity.group\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"group\"\n
...and its corresponding Limitador condition:
auth.identity.group != \"admin\"\n
The following additional artificial descriptor actions will be generated:
# associated with route rule GET|POST /toys*\n\n- generic_key:\n descriptor_key: \"toystore/toystore-non-admin-users/toys\"\n descriptor_value: \"1\"\n\n# associated with route rule /assets/*\n\n- generic_key:\n descriptor_key: \"toystore/toystore-non-admin-users/assets\"\n descriptor_value: \"1\"\n
...and their corresponding Limitador conditions.
In the end, the following Limitador configuration is yielded:
- conditions:\n - toystore/toystore-non-admin-users/toys == \"1\"\n - auth.identity.group != \"admin\"\n max_value: 50\n seconds: 60\n namespace: kuadrant\n\n\n- conditions:\n - toystore/toystore-non-admin-users/assets == \"1\"\n - auth.identity.group != \"admin\"\n max_value: 5\n seconds: 60\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#support-in-wasm-shim-and-envoy-rl-api","title":"Support in wasm shim and Envoy RL API","text":"This proposal tries to keep compatibility with the Envoy API for rate limit and does not introduce any new requirement that otherwise would require the use of wasm shim to be implemented.
In the case of implementation of this proposal in the wasm shim, all types of matchers supported by the HTTPRouteMatch type of Gateway API must be also supported in the rate_limit_policies.gateway_actions.rules
field of the wasm plugin configuration. These include matchers based on path (prefix, exact), headers, query string parameters and method.
"},{"location":"architecture/rfcs/0001-rlp-v2/#drawbacks","title":"Drawbacks","text":"HTTPRoute editing occasionally required Need to duplicate rules that don't explicitly include a matcher wanted for the policy, so that matcher can be added as a special case for each of those rules.
Risk of over-targeting Some HTTPRouteRules might need to be split into more specific ones so a limit definition is not bound to beyond intended (e.g. target method: GET
when the route matches method: POST|GET
).
Prone to consistency issues Typos and updates to the HTTPRoute can easily cause a mismatch and invalidate a RLP.
Two types of conditions \u2013 routeSelectors
and when
conditions Although with different meanings (evaluates in the gateway vs. evaluated in Limitador) and meant for expressing different types of rules (HTTPRouteRule selectors vs. \"soft\" conditions based on attributes not related to the HTTP request), users might still perceive these as two ways of expressing conditions and find difficult to understand at first that \"soft\" conditions do not accept expressions related to attributes of the HTTP request.
"},{"location":"architecture/rfcs/0001-rlp-v2/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0001-rlp-v2/#targeting-full-httprouterules","title":"Targeting full HTTPRouteRules","text":"Requiring users to specify full HTTPRouteRule matches in the RLP (as opposed to any subset of HTTPRoureMatches of targeted HTTPRouteRules \u2013 current proposal) contains some of the same drawbacks of this proposal, such as HTTPRoute editing occasionally required and prone to consistency issues. If, on one hand, it eliminates the risk of over-targeting, on the other hand, it does it at the cost of requiring excessively verbose policies written by the users, to the point of sometimes expecting user to have to specify trigger matching rules that are significantly more than what's originally and strictly intended.
E.g.:
On a HTTPRoute that contains the following HTTPRouteRules (simplified representation):
{ header: x-canary=true } \u2192 backend-canary\n{ * } \u2192 backend-rest\n
Where the user wants to define a RLP that targets { method: POST }
. First, the user needs to edit the HTTPRoute and duplicate the HTTPRouteRules:
{ header: x-canary=true, method: POST } \u2192 backend-canary\n{ header: x-canary=true } \u2192 backend-canary\n{ method: POST } \u2192 backend-rest\n{ * } \u2192 backend-rest\n
Then, user needs to include the following trigger in the RLP so only full HTTPRouteRules are specified:
{ header: x-canary=true, method: POST }\n{ method: POST }\n
The first matching rule of the trigger (i.e. { header: x-canary=true, method: POST }
) is beoynd the original user intent of targeting simply { method: POST }
.
This issue can be even more concerning in the case of targeting gateways with multiple child HTTPRoutes. All the HTTPRoutes would have to be fixed and the HTTPRouteRules that cover for all the cases in all HTTPRoutes listed in the policy targeting the gateway.
"},{"location":"architecture/rfcs/0001-rlp-v2/#all-limit-definitions-apply-vs-limit-shadowing","title":"All limit definitions apply vs. Limit \"shadowing\"","text":"The proposed binding between limit definition and HTTPRouteRules that trigger the limits was thought so multiple limit definitions can be bound to a same HTTPRouteRule that triggers those limits in Limitador. That means that no limit definition will \"shadow\" another at the level of the RLP controller, i.e. the RLP controller will honour the intended binding according to the selectors specified in the policy.
Due to how things work as of today in Limitador nonetheless, i.e., the rule of the most restrictive limit wins, and because all limit definitions triggered by a given shared HTTPRouteRule, it might be the case that, across multiple limits triggered, one limit ends up \"shadowing\" other limits. However, that is by implementation of Limitador and therefore beyond the scope of the API.
An alternative to the approach of allowing all limit definitions to be bound to a same selected HTTPRouteRules would be enforcing that, amongst multiple limit definitions targeting a same HTTPRouteRule, only the first of those limits definitions is bound to the HTTPRouteRule. This alternative approach effectively would cause the first limit to \"shadow\" any other on that particular HTTPRouteRule, as by implementation of the RLP controller (i.e., at API level).
While the first approach causes an artificial Limitador condition of the form <policy-ns>/<policy-name>/<limit-name> == \"1\"
, the alternative approach (\"limit shadowing\") could be implemented by generating a descriptor of the following form instead: ratelimit.binding == \"<policy-ns>/<policy-name>/<limit-name>\"
.
The downside of allowing multiple bindings to the same HTTPRouteRule is that all limits apply in Limitador, thus making status report frequently harder. The most restritive rate limit strategy implemented by Limitador might not be obvious to users who set multiple limit definitions and will require additional information reported back to the user about the actual status of the limit definitions stated in a RLP. On the other hand, it allows enables use cases of different limit definitions that vary on the counter qualifiers, additional \"soft\" conditions, or actual rate limit values to be triggered by a same HTTPRouteRule.
"},{"location":"architecture/rfcs/0001-rlp-v2/#writing-soft-when-conditions-based-on-attributes-of-the-http-request","title":"Writing \"soft\" when
conditions based on attributes of the HTTP request","text":"As a first step, users will not be able to write \"soft\" when
conditions to selective apply rate limit definitions based on attributes of the HTTP request that otherwise could be specified using the routeSelectors
field of the RLP instead.
On one hand, using when
conditions for route filtering would make it easy to define limits when the HTTPRoute cannot be modified to include the special rule. On the other hand, users would miss information in the status. An HTTPRouteRule for GET|POST /toys*
, for example, that is targeted with an additional \"soft\" when
condition that specifies that the method must be equal to GET
and the path exactly equal to /toys/special
(see Example 3) would be reported as rate limited with extra details that this is in fact only for GET /toys/special
. For small deployments, this might be considered acceptable; however it would easily explode to unmanageable number of cases for deployments with only a few limit definitions and HTTPRouteRules.
Moreover, by not specifying a more strict HTTPRouteRule for GET /toys/special
, the RLP controller would bind the limit definition to other rules that would cause the rate limit filter to invoke the rate limit service (Limitador) for cases other than strictly GET /toys/special
. Even if the rate limits would still be ensured to apply in Limitador only for GET /toys/special
(due to the presence of a hypothetical \"soft\" when
condition), an extra no-op hop to the rate limit service would happen. This is avoided with the current imposed limitation.
Example of \"soft\" when
conditions for rate limit based on attributes of the HTTP request (NOT SUPPORTED):
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-special-toys\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n specialToys:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the original HTTPRouteRule GET|POST /toys*\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n when:\n - selector: context.request.http.method # cannot omit this selector or POST /toys/special would also be rate limited\n operator: eq\n value: GET\n - selector: context.request.http.path\n operator: eq\n value: /toys/special\n
How is this RLP would be implemented under the hood if supported? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-special-toys/specialToys\"\n descriptor_value: \"1\"\n - request_headers:\n descriptor_key: \"context.request.http.method\"\n header_name: \":method\"\n - request_headers:\n descriptor_key: \"context.request.http.path\"\n header_name: \":path\"\n
limits:\n\n- conditions:\n - toystore/toystore-special-toys/specialToys == \"1\"\n - context.request.http.method == \"GET\"\n - context.request.http.path == \"/toys/special\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#possible-variations-for-the-selectors-conditions-and-counter-qualifiers","title":"Possible variations for the selectors (conditions and counter qualifiers)","text":"The main drivers behind the proposed design for the selectors (conditions and counter qualifiers), based on (i) structured condition expressions composed of fields selector
, operator
, and value
, and (ii) when
conditions and counters
separated in two distinct fields (variation \"C\" below), are:
- consistency with the Authorino
AuthConfig
API, which also specifies when
conditions expressed in selector
, operator
, and value
fields; - explicit user intent, without subtle distinction of meaning based on presence of optional fields.
Nonetheless here are a few alternative variations to consider:
Structured condition expressions Parsed condition expressions Single field A \nselectors:\n\n - selector: context.request.http.method\n operator: eq\n value: GET\n - selector: auth.identity.username
B \nselectors:\n - context.request.http.method == \"GET\"\n - auth.identity.username
Distinct fields C \u2b50\ufe0f \nwhen:\n - selector: context.request.http.method\n operator: eq\n value: GET\ncounters:\n - auth.identity.username
D \nwhen:\n - context.request.http.method == \"GET\"\ncounters:\n - auth.identity.username
\u2b50\ufe0f Variation adopted for the examples and (so far) final design proposal.
"},{"location":"architecture/rfcs/0001-rlp-v2/#prior-art","title":"Prior art","text":"Most implementations currently orbiting around Gateway API (e.g. Istio, Envoy Gateway, etc) for added RL functionality seem to have been leaning more to the direct route extension pattern instead of Policy Attachment. That might be an option particularly suitable for gateway implementations (gateway providers) and for those aiming to avoid dealing with defaults and overrides.
"},{"location":"architecture/rfcs/0001-rlp-v2/#unresolved-questions","title":"Unresolved questions","text":" - In case a limit definition lists route selectors such that some can be bound to HTTPRouteRules and some cannot (see Example 6), do we bind the valid route selectors and ignore the invalid ones or the limit definition is invalid altogether and bound to no HTTPRouteRule at all? A: By allowing multiple limit definitions to target a same HTTPRouteRule, the issue here stated will become less often. For the other cases where a limit definition still fails to select an HTTPRouteRule (e.g. due to mismatching trigger matches), the limit definition is not considered invalid. Possibly the limit definitions is considered \"stale\" (or \"orphan\"), i.e., not bound to any HTTPRouteRule.
- What should we fill domain/namespace with, if no longer with the hostname? This can be useful for multi-tenancy. A: For now, the domain/namespace field of the RL configuration (Envoy and Limitador ends) will be filled with a fixed (configurable) string (e.g. \"kuadrant\"). This can change in future to better support multi-tenancy and/or other use cases where a total sharding of the limit definitions within a same instance of Kuadrant is desired.
- How do we support lists of hostnames in Limitador conditions (single counter)? Should we open an issue for a new
in
operator? A: Not needed. The hostnames must exist in the targeted object explicitly, just like any other routing rules intended to be targeted by a limit definition. By setting the explicit hostname in the targeted network object (Gateway or HTTPRoute), the also becomes a route rules available for \"hard\" trigger configuration. - What \"soft\" condition
operator
s do we need to support (e.g. eq
, neq
, exists
, nexists
, matches
)? - Do we need special field to define shared counters across clusters/Limitador instances or that's to be solved at another layer (
Limitador
, Kuadrant
CRDs, MCTC)?
"},{"location":"architecture/rfcs/0001-rlp-v2/#future-possibilities","title":"Future possibilities","text":" - Port
routeSelectors
and the semantics around it to the AuthPolicy
API (aka \"KAP v2\"). - Defaults and overrides, either along the lines of architecture#4 or architecture#10.
"},{"location":"architecture/rfcs/0002-well-known-attributes/","title":"Well-known Attributes","text":" - Feature Name:
well-known-attributes
- Start Date: 2023-06-13
- RFC PR: Kuadrant/architecture#17
- Issue tracking: Kuadrant/architecture#53
"},{"location":"architecture/rfcs/0002-well-known-attributes/#summary","title":"Summary","text":"Define a well-known structure for users to declare request data selectors in their RateLimitPolicies and AuthPolicies. This structure is referred to as the Kuadrant Well-known Attributes.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#motivation","title":"Motivation","text":"The well-known attributes let users write policy rules \u2013 conditions and, in general, dynamic values that refer to attributes in the data plane - in a concise and seamless way.
Decoupled from the policy CRDs, the well-known attributes:
- define a common language for referring to values of the data plane in the Kuadrant policies;
- allow dynamically evolving the policy APIs regarding how they admit references to data plane attributes;
- encompass all common and component-specific selectors for data plane attributes;
- have a single and unified specification, although this specification may occasionally link to additional, component-specific, external docs.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#guide-level-explanation","title":"Guide-level explanation","text":"One who writes a Kuadrant policy and wants to build policy constructs such as conditions, qualifiers, variables, etc, based on dynamic values of the data plane, must refer the attributes that carry those values, using the declarative language of Kuadrant's Well-known Attributes.
A dynamic data plane value is typically a value of an attribute of the request or an Envoy Dynamic Metadata entry. It can be a value of the outer request being handled by the API gateway or proxy that is managed by Kuadrant (\"context request\") or an attribute of the direct request to the Kuadrant component that delivers the functionality in the data plane (rate-limiting or external auth).
A Well-known Selector is a construct of a policy API whose value contains a direct reference to a well-known attribute. The language of the well-known attributes and therefore what one would declare within a well-known selector resembles a JSON path for navigating a possibly complex JSON object.
Example 1. Well-known selector used in a condition
apiGroup: examples.kuadrant.io\nkind: PaintPolicy\nspec:\n rules:\n\n - when:\n - selector: auth.identity.group\n operator: eq\n value: admin\n color: red\n
In the example, auth.identity.group
is a well-known selector of an attribute group
, known to be injected by the external authorization service (auth
) to describe the group the user (identity
) belongs to. In the data plane, whenever this value is equal to admin
, the abstract PaintPolicy
policy states that the traffic must be painted red
.
Example 2. Well-known selector used in a variable
apiGroup: examples.kuadrant.io\nkind: PaintPolicy\nspec:\n rules:\n\n - color: red\n alpha:\n dynamic: request.headers.x-color-alpha\n
In the example, request.headers.x-color-alpha
is a selector of a well-known attribute request.headers
that gives access to the headers of the context HTTP request. The selector retrieves the value of the x-color-alpha
request header to dynamically fill the alpha
property of the abstract PaintPolicy
policy at each request.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#reference-level-explanation","title":"Reference-level explanation","text":"The Well-known Attributes are a compilation inspired by some of the Envoy attributes and Authorino's Authorization JSON and its related JSON paths.
From the Envoy attributes, only attributes that are available before establishing connection with the upstream server qualify as a Kuadrant well-known attribute. This excludes attributes such as the response attributes and the upstream attributes.
As for the attributes inherited from Authorino, these are either based on Envoy's AttributeContext
type of the external auth request API or from internal types defined by Authorino to fulfill the Auth Pipeline.
These two subsets of attributes are unified into a single set of well-known attributes. For each attribute that exists in both subsets, the name of the attribute as specified in the Envoy attributes subset prevails. Example of such is request.id
(to refer to the ID of the request) superseding context.request.http.id
(as the same attribute is referred in an Authorino AuthConfig
).
The next sections specify the well-known attributes organized in the following groups:
- Request attributes
- Connection attributes
- Metadata and filter state attributes
- Auth attributes
- Rate-limit attributes
"},{"location":"architecture/rfcs/0002-well-known-attributes/#request-attributes","title":"Request attributes","text":"The following attributes are related to the context HTTP request that is handled by the API gateway or proxy managed by Kuadrant.
Attribute
Type
Description
Auth
RL
request.id
String
Request ID corresponding to x-request-id
header value
\u2713
\u2713
request.time
Timestamp
Time of the first byte received
\u2713
\u2713
request.protocol
String
Request protocol (\u201cHTTP/1.0\u201d, \u201cHTTP/1.1\u201d, \u201cHTTP/2\u201d, or \u201cHTTP/3\u201d)
\u2713
\u2713
request.scheme
String
The scheme portion of the URL e.g. \u201chttp\u201d
\u2713
\u2713
request.host
String
The host portion of the URL
\u2713
\u2713
request.method
String
Request method e.g. \u201cGET\u201d
\u2713
\u2713
request.path
String
The path portion of the URL
\u2713
\u2713
request.url_path
String
The path portion of the URL without the query string
\u2713
request.query
String
The query portion of the URL in the format of \u201cname1=value1&name2=value2\u201d
\u2713
\u2713
request.headers
Map<String, String>
All request headers indexed by the lower-cased header name
\u2713
\u2713
request.referer
String
Referer request header
\u2713
request.useragent
String
User agent request header
\u2713
request.size
Number
The HTTP request size in bytes. If unknown, it must be -1
\u2713
request.body
String
The HTTP request body. (Disabled by default. Requires additional proxy configuration to enabled it.)
\u2713
request.raw_body
Array<Number>
The HTTP request body in bytes. This is sometimes used instead of body
depending on the proxy configuration.
\u2713
request.context_extensions
Map<String, String>
This is analogous to request.headers
, however these contents are not sent to the upstream server. It provides an extension mechanism for sending additional information to the auth service without modifying the proto definition. It maps to the internal opaque context in the proxy filter chain. (Requires additional configuration in the proxy.)
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#connection-attributes","title":"Connection attributes","text":"The following attributes are available once the downstream connection with the API gateway or proxy managed by Kuadrant is established. They apply to HTTP requests (L7) as well, but also to proxied connections limited at L3/L4.
Attribute
Type
Description
Auth
RL
source.address
String
Downstream connection remote address
\u2713
\u2713
source.port
Number
Downstream connection remote port
\u2713
\u2713
source.service
String
The canonical service name of the peer
\u2713
source.labels
Map<String, String>
The labels associated with the peer. These could be pod labels for Kubernetes or tags for VMs. The source of the labels could be an X.509 certificate or other configuration.
\u2713
source.principal
String
The authenticated identity of this peer. If an X.509 certificate is used to assert the identity in the proxy, this field is sourced from \u201cURI Subject Alternative Names\u201c, \u201cDNS Subject Alternate Names\u201c or \u201cSubject\u201c in that order. The format is issuer specific \u2013 e.g. SPIFFE format is spiffe://trust-domain/path
, Google account format is https://accounts.google.com/{userid}
.
\u2713
source.certificate
String
The X.509 certificate used to authenticate the identify of this peer. When present, the certificate contents are encoded in URL and PEM format.
\u2713
destination.address
String
Downstream connection local address
\u2713
\u2713
destination.port
Number
Downstream connection local port
\u2713
\u2713
destination.service
String
The canonical service name of the peer
\u2713
destination.labels
Map<String, String>
The labels associated with the peer. These could be pod labels for Kubernetes or tags for VMs. The source of the labels could be an X.509 certificate or other configuration.
\u2713
destination.principal
String
The authenticated identity of this peer. If an X.509 certificate is used to assert the identity in the proxy, this field is sourced from \u201cURI Subject Alternative Names\u201c, \u201cDNS Subject Alternate Names\u201c or \u201cSubject\u201c in that order. The format is issuer specific \u2013 e.g. SPIFFE format is spiffe://trust-domain/path
, Google account format is https://accounts.google.com/{userid}
.
\u2713
destination.certificate
String
The X.509 certificate used to authenticate the identify of this peer. When present, the certificate contents are encoded in URL and PEM format.
\u2713
connection.id
Number
Downstream connection ID
\u2713
connection.mtls
Boolean
Indicates whether TLS is applied to the downstream connection and the peer ceritificate is presented
\u2713
connection.requested_server_name
String
Requested server name in the downstream TLS connection
\u2713
connection.tls_session.sni
String
SNI used for TLS session
\u2713
connection.tls_version
String
TLS version of the downstream TLS connection
\u2713
connection.subject_local_certificate
String
The subject field of the local certificate in the downstream TLS connection
\u2713
connection.subject_peer_certificate
String
The subject field of the peer certificate in the downstream TLS connection
\u2713
connection.dns_san_local_certificate
String
The first DNS entry in the SAN field of the local certificate in the downstream TLS connection
\u2713
connection.dns_san_peer_certificate
String
The first DNS entry in the SAN field of the peer certificate in the downstream TLS connection
\u2713
connection.uri_san_local_certificate
String
The first URI entry in the SAN field of the local certificate in the downstream TLS connection
\u2713
connection.uri_san_peer_certificate
String
The first URI entry in the SAN field of the peer certificate in the downstream TLS connection
\u2713
connection.sha256_peer_certificate_digest
String SHA256 digest of the peer certificate in the downstream TLS connection if present
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#metadata-and-filter-state-attributes","title":"Metadata and filter state attributes","text":"The following attributes are related to the Envoy proxy filter chain. They include metadata exported by the proxy throughout the filters and information about the states of the filters themselves.
Attribute
Type
Description
Auth
RL
metadata
Metadata
Dynamic request metadata
\u2713
\u2713
filter_state
Map<String, String>
Mapping from a filter state name to its serialized string value
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#auth-attributes","title":"Auth attributes","text":"The following attributes are exclusive of the external auth service (Authorino).
Attribute
Type
Description
Auth
RL
auth.identity
Any
Single resolved identity object, post-identity verification
\u2713
auth.metadata
Map<String, Any>
External metadata fetched
\u2713
auth.authorization
Map<String, Any>
Authorization results resolved by each authorization rule, access granted only
\u2713
auth.response
Map<String, Any>
Response objects exported by the auth service post-access granted
\u2713
auth.callbacks
Map<String, Any>
Response objects returned by the callback requests issued by the auth service
\u2713
The auth service also supports modifying selected values by chaining modifiers in the path.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#rate-limit-attributes","title":"Rate-limit attributes","text":"The following attributes are exclusive of the rate-limiting service (Limitador).
Attribute
Type
Description
Auth
RL
ratelimit.domain
String
The rate limit domain. This enables the configuration to be namespaced per application (multi-tenancy).
\u2713
ratelimit.hits_addend
Number
Specifies the number of hits a request adds to the matched limit. Fixed value: `1`. Reserved for future usage.
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#drawbacks","title":"Drawbacks","text":"The decoupling of the well-known attributes and the language of well-known attributes and selectors from the individual policy CRDs is what makes it somewhat flexible and common across the components (rate-limiting and auth). However, it's less structured and it introduces another syntax for users to get familiar with.
This additional language competes with the language of the route selectors (RFC 0001), based on Gateway API's HTTPRouteMatch
type.
Being \"soft-coded\" in the policy specs (as opposed to a hard-coded sub-structure inside of each policy type) does not mean it's completely decoupled from implementation in the control plane and/or intermediary data plane components. Although many attributes can be supported almost as a pass-through, from being used in a selector in a policy, to a corresponding value requested by the wasm-shim to its host, that is not always the case. Some translation may be required for components not integrated via wasm-shim (e.g. Authorino), as well as for components integrated via wasm-shim (e.g. Limitador) in special cases of composite or abstraction well-known attributes (i.e. attributes not available as-is via ABI, e.g. auth.identity
in a RLP). Either way, some validation of the values introduced by users in the selectors may be needed at some point in the control plane, thus requiring arguably a level of awaresness and coupling between the well-known selectors specification and the control plane (policy controllers) or intermediary data plane (wasm-shim) components.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#rationale-and-alternatives","title":"Rationale and alternatives","text":"As an alternative to JSON path-like selectors based on a well-known structure that induces the proposed language of well-known attributes, these same attributes could be defined as sub-types of each policy CRD. The Golang packages defining the common attributes across CRDs could be shared by the policy type definitions to reduce repetition. However, that approach would possibly involve a staggering number of new type definitions to cover all the cases for all the groups of attributes to be supported. These are constructs that not only need to be understood by the policy controllers, but also known by the user who writes a policy.
Additionally, all attributes, including new attributes occasionally introduced by Envoy and made available to the wasm-shim via ABI, would always require translation from the user-level abstraction how it's represented in a policy, to the actual form how it's used in the wasm-shim configuration and Authorino AuthConfigs.
Not implementing this proposal and keeping the current state of things mean little consistency between these common constructs for rules and conditions on how they are represented in each type of policy. This lack of consistency has a direct impact on the overhead faced by users to learn how to interact with Kuadrant and write different kinds of policies, as well as for the maintainers on tasks of coding for policy validation and reconciliation of data plane configurations.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#prior-art","title":"Prior art","text":"Authorino's dynamic JSON paths, related to Authorino's Authorization JSON and used in when
conditions and inside of multiple other constructs of the AuthConfig, are an example of feature of very similar approach to the one proposed here.
Arguably, Authorino's perceived flexibility would not have been possible with the Authorization JSON selectors. Users can write quite sophisticated policy rules (conditions, variable references, etc) by leveraging the those dynamic selectors. Because they are backed by JSON-based machinery in the code, Authorino's selectors have very little to, in some cases, none at all variation compared Open Policy Agent's Rego policy language, which is often used side by side in the same AuthConfigs.
Authorino's Authorization JSON selectors are, in one hand, more restrict to the structure of the CheckRequest
payload (context.*
attributes). At the same time, they are very open in the part associated with the internal attributes built along the Auth Pipeline (i.e. auth.*
attributes). That makes Authorino's Authorization JSON selectors more limited, compared to the Envoy attributes made available to the wasm-shim via ABI, but also harder to validate. In some cases, such as of deep references to inside objects fetched from external sources of metadata, resolved OPA objects, JWT claims, etc, it is impossible to validate for correct references.
Another experience learned from Authorino's Authorization JSON selectors is that they depend substantially on the so-called \"modifiers\". Many use cases involving parsing and breaking down attributes that are originally available in a more complex form would not be possible without the modifiers. Examples of such cases are: extracting portions of the path and/or query string parameters (e.g. collection and resource identifiers), applying translations on HTTP verbs into corresponding operations, base64-decoding values from the context HTTP request, amongst several others.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#unresolved-questions","title":"Unresolved questions","text":" -
How to deal with the differences regarding the availability and data types of the attributes across clients/hosts?
-
Can we make more attributes that are currently available to only one of the components common to both?
-
Will we need some kind of global support for modifiers (functions) in the well-known selectors or those can continue to be an Authorino-only feature?
-
Does Authorino, which is more strict regarding the data structure that induces the selectors, need to implement this specification or could/should it keep its current selectors and a translation be performed by the AuthPolicy controller?
"},{"location":"architecture/rfcs/0002-well-known-attributes/#future-possibilities","title":"Future possibilities","text":" - Extend with more well-known attributes that abstract common patterns and/or for rather opinioned use cases. Examples:
auth.*
attributes supported in the rate limit service request.authenticated
request.operation.(read|write)
request.param.my-param
-
connection.secure
-
Other Envoy attributes
Wasm attributes Attribute
Type
Description
Auth
RL
wasm.plugin_name
String
Plugin name
\u2713
wasm.plugin_root_id
String
Plugin root ID
\u2713
wasm.plugin_vm_id
String
Plugin VM ID
\u2713
wasm.node
Node
Local node description
\u2713
wasm.cluster_name
String
Upstream cluster name
\u2713
wasm.cluster_metadata
Metadata
Upstream cluster metadata
\u2713
wasm.listener_direction
Number
Enumeration value of the listener traffic direction
\u2713
wasm.listener_metadata
Metadata
Listener metadata
\u2713
wasm.route_name
String
Route name
\u2713
wasm.route_metadata
Metadata
Route metadata
\u2713
wasm.upstream_host_metadata
Metadata
Upstream host metadata
\u2713
Proxy configuration attributes Attribute
Type
Description
Auth
RL
xds.cluster_name
String
Upstream cluster name
\u2713
xds.cluster_metadata
Metadata
Upstream cluster metadata
\u2713
xds.route_name
String
Route name
\u2713
xds.route_metadata
Metadata
Route metadata
\u2713
xds.upstream_host_metadata
Metadata
Upstream host metadata
\u2713
xds.filter_chain_name
String
Listener filter chain name
\u2713
- Add some support for value modifiers (functions), along the lines of Authorino's JSON path modifiers and/or Envoy attributes' path expressions.
"},{"location":"architecture/rfcs/0003-dns-policy/","title":"RFC Template","text":" - Feature Name: DNSPolicy
- Start Date: 2023-07-01
- RFC PR: Kuadrant/architecture#20
- Issue tracking: Kuadrant/multicluster-gateway-controller#219
- Labels: DNS, Load Balancing, Multi-Cluster
"},{"location":"architecture/rfcs/0003-dns-policy/#summary","title":"Summary","text":"Provide a policy for configuring how DNS should be handed for a given gateway. Provide a mechanism for enabling DNS based load balancing.
"},{"location":"architecture/rfcs/0003-dns-policy/#motivation","title":"Motivation","text":"Gateway admins, need a way to define the DNS policy for a multi-cluster gateway in order to control how much and which traffic reaches these gateways. Ideally we would allow them to express a strategy that they want to use without needing to get into the details of each provider and needing to create and maintain dns record structure and individual records for all the different gateways that may be within their infrastructure.
"},{"location":"architecture/rfcs/0003-dns-policy/#guide-level-explanation","title":"Guide-level explanation","text":"Allow definition of a DNSPolicy that configures load balancing to decide how traffic should be distributed across multiple gateway instances from the central control plane.
"},{"location":"architecture/rfcs/0003-dns-policy/#terms","title":"Terms","text":" - managed listener: This is a listener with a host backed by a DNS zone managed by the multi-cluster gateway controller
- hub cluster: control plane cluster that managed 1 or more spokes
- spoke cluster: a cluster managed by the hub control plane cluster. This is where gateway are instantiated
Provide a control plane DNSPolicy API that uses the idea of direct policy attachment from gateway API that allows a load balancing strategy to be applied to the DNS records structure for any managed listeners being served by the data plane instances of this gateway. The DNSPolicy also covers health checks that inform the DNS response but that is not covered in this document.
Below is a draft API for what we anticipate the DNSPolicy to look like
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n health:\n ...\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom: #optional\n\n - value: AWS #optional with both GEO and weighted. With GEO the custom weight is applied to gateways within a Geographic region\n weight: 10\n - value: GCP\n weight: 20\n GEO: #optional\n defaultGeo: IE # required with GEO. Chooses a default DNS response when no particular response is defined for a request from an unknown GEO.\n
"},{"location":"architecture/rfcs/0003-dns-policy/#available-load-balancing-strategies","title":"Available Load Balancing Strategies","text":"GEO and Weighted load balancing are well understood strategies and this API effectively allow a complex requirement to be expressed relatively simply and executed by the gateway controller in the chosen DNS provider. Our default policy will execute a \"Round Robin\" weighted strategy which reflects the current default behaviour.
With the above API we can provide weighted and GEO and weighted within a GEO. A weighted strategy with a minimum of a default weight is always required and the simplest type of policy. The multi-cluster gateway controller will set up a default policy when a gateway is discovered (shown below). This policy can be replaced or modified by the user. A weighted strategy can be complimented with a GEO strategy IE they can be used together in order to provide a GEO and weighted (within a GEO) load balancing. By defining a GEO section, you are indicating that you want to use a GEO based strategy (how this works is covered below).
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: default-policy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted: # required\n defaultWeight: 10 #required, all records created get this weight\n health:\n ... \n
In order to provide GEO based DNS and allow customisation of the weighting, we need some additional information to be provided by the gateway / cluster admin about where this gateway has been placed. For example if they want to use GEO based DNS as a strategy, we need to know what GEO identifier(s) to use for each record we create and a default GEO to use as a catch-all. Also, if the desired load balancing approach is to provide custom weighting and no longer simply use Round Robin, we will need a way to identify which records to apply that custom weighting to based on the clusters the gateway is placed on.
To solve this we will allow two new attributes to be added to the ManagedCluster
resource as labels:
kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
These two labels allow setting values in the DNSPolicy that will be reflected into DNS records for gateways placed on that cluster depending on the strategies used. (see the first DNSPolicy definition above to see how these values are used) or take a look at the examples at the bottom.
example :
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\nspec: \n
The attributes provide the key and value we need in order to understand how to define records for a given LB address based on the DNSPolicy targeting the gateway.
The kuadrant.io/lb-attribute-geo-code
attribute value is provider specific, using an invalid code will result in an error status condition in the DNSrecord resource.
"},{"location":"architecture/rfcs/0003-dns-policy/#dns-record-structure","title":"DNS Record Structure","text":"This is an advanced topic and so is broken out into its own proposal doc DNS Record Structure
"},{"location":"architecture/rfcs/0003-dns-policy/#custom-weighting","title":"Custom Weighting","text":"Custom weighting will use the associated custom-weight
attribute set on the ManagedCluster
to decide which records should get a specific weight. The value of this attribute is up to the end user.
example:
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
The above is then used in the DNSPolicy to set custom weights for the records associated with the target gateway.
- value: GCP\n weight: 20\n
So any gateway targeted by a DNSPolicy with the above definition that is placed on a ManagedCluster
with the kuadrant.io/lb-attribute-custom-weight
set with a value of GCP will get an A record with a weight of 20
"},{"location":"architecture/rfcs/0003-dns-policy/#status","title":"Status","text":"DNSPolicy should have a ready condition that reflect that the DNSRecords have been created and configured as expected. In the case that there is an invalid policy, the status message should reflect this and indicate to the user that the old DNS has been preserved.
We will also want to add a status condition to the gateway status indicating it is effected by this policy. Gateway API recommends the following status condition
- type: gateway.networking.k8s.io/PolicyAffected\n status: True \n message: \"DNSPolicy has been applied\"\n reason: PolicyApplied\n ...\n
https://github.com/kubernetes-sigs/gateway-api/pull/2128/files#diff-afe84021d0647e83f420f99f5d18b392abe5ec82d68f03156c7534de9f19a30aR888
"},{"location":"architecture/rfcs/0003-dns-policy/#example-policies","title":"Example Policies","text":""},{"location":"architecture/rfcs/0003-dns-policy/#round-robin-the-default-policy","title":"Round Robin (the default policy)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: RoundRobinPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n
"},{"location":"architecture/rfcs/0003-dns-policy/#geo-round-robin","title":"GEO (Round Robin)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNS\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n GEO:\n defaultGeo: IE\n
"},{"location":"architecture/rfcs/0003-dns-policy/#custom","title":"Custom","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: SendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure #any record associated with a gateway on a cluster without this value gets the default\n weight: 30\n
"},{"location":"architecture/rfcs/0003-dns-policy/#geo-with-custom-weights","title":"GEO with Custom Weights","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNSAndSendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure\n weight: 30\n GEO:\n defaultGeo: IE\n
"},{"location":"architecture/rfcs/0003-dns-policy/#reference-level-explanation","title":"Reference-level explanation","text":" - Add a DNSPolicy CRD that conforms to policy attachment spec
- Add a new DNSPolicy controller to MCG
- DNS logic and record management should all migrate out of the gateway controller into this new DNSPolicy controller as it is the responsibility and domain of the DNSPolicy controller to manage DNS
- remove the Hosts interface as we want do not want other controllers using this to bring DNS Logic into other areas of the code.
"},{"location":"architecture/rfcs/0003-dns-policy/#drawbacks","title":"Drawbacks","text":"You cannot have a different load balancing strategy for each listener within a gateway. So in the following gateway definition
spec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n hostname: myapp.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS\n - allowedRoutes:\n namespaces:\n from: All\n hostname: other.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS \n
The DNS policy targeting this gateway will apply to both myapp.hcpapps.net and other.hcpapps.net
However, there is still significant value even with this limitation. This limitation is something we will likely revisit in the future
"},{"location":"architecture/rfcs/0003-dns-policy/#background-docs","title":"Background Docs","text":"DNS Provider Support
AWS DNS
Google DNS
Azure DNS
Direct Policy Attachment
"},{"location":"architecture/rfcs/0003-dns-policy/#rationale-and-alternatives","title":"Rationale and alternatives","text":"An alternative is to configure all of this yourself manually in a dns provider. This is can be a highly complex dns configuration that it would be easy to get wrong.
"},{"location":"architecture/rfcs/0004-policy-status/","title":"Policy Status","text":" - Feature Name:
policy_status_states
- Start Date: 2023-02-03
- RFC PR: Kuadrant/architecture#0009
- Issue tracking: Kuadrant/architecture#0038
"},{"location":"architecture/rfcs/0004-policy-status/#summary","title":"Summary","text":"This RFC proposes a new design for any Kuadrant Policy (RateLimitPolicy
, AuthPolicy
, etc..) status definition and transitions.
"},{"location":"architecture/rfcs/0004-policy-status/#motivation","title":"Motivation","text":"At the time being, the RateLimitPolicy
and AuthPolicy
status doesn't clearly and truthfully communicate the actual state of reconciliation and healthiness with its operator managed services, i.e., the Rate Limit service (\"Limitador\") and the Auth service (\"Authorino\"), referred to as \"Kuadrant services\".
As a consequence, misleading information is shared causing unexpected errors and flawed assumptions.
The following are some issues reported in relation to the aforementioned problems:
- https://github.com/Kuadrant/kuadrant-operator/issues/87
- https://github.com/Kuadrant/kuadrant-operator/issues/96
- https://github.com/Kuadrant/kuadrant-operator/issues/140
"},{"location":"architecture/rfcs/0004-policy-status/#guide-level-explanation","title":"Guide-level explanation","text":"This design for setting the status of the Kuadrant policy CRs is divided in 2 stages, where each stage could be applied/developed in order and would reflect valuable and accurate information with different degrees of acuity.
The Policy CRD Status in the following diagrams are simplified as states, which in the Reference-level explanation will be translated to the actual Status Conditions.
"},{"location":"architecture/rfcs/0004-policy-status/#stage-1","title":"Stage 1","text":"State of the policy CR defined by: application, validation, and reconciliation of it
The main signalization at Stage 1 is about whether a policy CR has been Accepted
or not.
States rationale:
Accepted
: This state is reached after the Validation
and Reconciliation
event has being successfully passed. Invalid
: When the Validation
process encounters an error, this state will be set. TargetNotFound
: This state will be set when the Reconciliation
process encounters an error. Conflicted
: This state will be set when the Reconciliation
process encounters an error.
Notes:
- States from the Stage 2 could be implemented as well, but only relying on Validation and Reconciliation events.
"},{"location":"architecture/rfcs/0004-policy-status/#stage-2","title":"Stage 2","text":"Final state of the policy CR defined by: health check with the Kuadrant services (post-reconciliation)
The Enforced
type is introduced to capture the difference between a policy been reconciled and it's been enforced at the service.
States rationale:
Enforced
: After a successful response of the Service Probe
, this states communicates the policy is finally enforced. PartiallyEnforced
: This state will be set when the Reconciliation
event encounters an overlap with other policies. Overridden
: This state will be set when the Reconciliation
event invalidates the policy because another one takes precedence.
"},{"location":"architecture/rfcs/0004-policy-status/#reference-level-explanation","title":"Reference-level explanation","text":"In general, the new states and conditions align with GEP-713.
Besides the proposed Accepted
PolicyType, the Enforced
PolicyType would be added to reflect the final state of the policy, which means that the policy is showing the synced actual state of the Kuadrant services.
The missing Failed
PolicyType would be implicitly represented by the TargetNotFound
and Invalid
PolicyTypeReason.
"},{"location":"architecture/rfcs/0004-policy-status/#conditions","title":"Conditions","text":"All conditions are top-level.
Type Status Reason Message Accepted True \"Accepted\" \"KuadrantPolicy has been accepted\" False \"Conflicted\" \"KuadrantPolicy is conflicted by [policy-ns/policy-name], ...\" False \"Invalid\" \"KuadrantPolicy is invalid\" False \"TargetNotFound\" \"KuadrantPolicy target [resource-name] was not found\" Enforced True \"Enforced\" \"KuadrantPolicy has been successfully enforced\" False \"Unknown\" \"KuadrantPolicy has encountered some issues\" False \"Overridden\" \"KuadrantPolicy is overridden by [policy-ns/policy-name], ...\" Messages corresponding falsey statuses are required and should reflect the error that encountered.
It's possible to have the Failed state as a top level condition too. In this case, it might be useful to consider a third \"Unknown\" status.
"},{"location":"architecture/rfcs/0004-policy-status/#policy-ancestor-status","title":"Policy ancestor status","text":"The Status stanza of the policy CRs must implement Gateway API's PolicyAncestorStatus struct. This will provide broader consistency and improved discoverability of effective policies.
"},{"location":"architecture/rfcs/0004-policy-status/#implementation-detailsrequisites","title":"Implementation details/requisites","text":"Full implementation of Stage 2 states assumes reporting mechanisms in place, provided by the Kuadrant services, that allow tracing the state of the configurations applied on the services, back to the original policies, to infer the final state of the policy CRs (i.e. whether truly Enforced
or not.)
Without such, Stage 2 could be only partially achieved, by relying only on Reconciliation events.
"},{"location":"architecture/rfcs/0004-policy-status/#drawbacks","title":"Drawbacks","text":" - This proposal will require to change the code controllers assert the status
- Since the Status is part of the \"API\", won't be backwards compatible
- Documentation updating
- The implementation of the affected policies will create a fan-out problem, that might lead to updating many policy objects and apiserver load.
"},{"location":"architecture/rfcs/0004-policy-status/#rationale-and-alternatives","title":"Rationale and alternatives","text":"Another option was considered (previously referred to as \"Option 1\"). While valid, this alternative would not align with GEP-713, neither it would be as flexible as the final design proposed.
Details of the discarded alternative This alternative design would come in 3 stages: **Stage 1 : State of the policy CR defined by: application and validation of it** This first stage is a simple version where the operator only relies on itself, not checking the healthiness with the Kuadrant services, but just validating the Spec. ![](0004-policy-status-assets/policy_status_1.png) States rationale: * `Created`: The initial state. It announces that the policy has successfully being created, the operator acknowledges it. * `Applied`: This state is reached after the `Validation` event has being successfully passed. * `Failed`: This one would be set when the `Validation` process encounters an error. This could be either condition's failed/error state or a top-level condition. * `Updated`: From `Failed` or `Applied`, it could be triggered a `Spec Change` event that would move it to this state. **Stage 2: Further reconciliation check provides a new state** This following one, besides checking what the former stage does, it also adds the states reflecting the reconciliation process of any needed Kubernets object, Kuadrant Services custom resources and any other 3rd party CR required. An example would be in the case of the RLP, it would create/update the `ConfigMap` holding the `Limitador` config file. ![](0004-policy-status-assets/policy_status_2.png) States rationale: * `Applied`: The __Applied__ state would not be final, and would be preceding a `Reconciliation` event. * `Reconciled`: It communicates that the policy has successfully being reconciled, and any K8s object or required CR has been updated. * `Failed`: This one would be reached when either of `Validation` and `Reconcilation` processes have encounter any errors. **Stage 3: Final state of the policy CR defined by: health check with the Kuadrant services (post-reconciliation)** The final stage would bring a greater degree of accuracy, thanks for a final process that would check the healthiness and configuration version the Kuadrant services currently enforces. ![](0004-policy-status-assets/policy_status_3.png) States rationale: * `Reconciled`: This state would precede the \"Health check\" process graphed as `Service Probe` event. * `Enforced`: After a successful response of the `Service Probe`, this states communicates the policy is finally enforced. This is the final top-level condition. * `Failed`: Now this state could also be set after encountering errors in the `Service Probe` check. The stages before mentioned would follow the [Kubernetes guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) regarding the Status object definition. **Conditions** All conditions are top-level. | Type | Status | Reason | Message | |-------------|--------|-----------------------------|-----------------------------------------------------------------------------| | Progressing | True | \"PolicyCreated\" | \"KuadrantPolicy created\" | | | True | \"PolicyUpdated\" | \"KuadrantPolicy has been updated\" | | | True | \"PolicyApplied\" | \"KuadrantPolicy has been successfully applied | | | True | \"PolicyReconciled\" | \"KuadrantPolicy has been successfully reconciled\" | | | False | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" | | | False | \"PolicyError\" | \"KuadrantPolicy has encountered an error\" | | Enforced | True | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" | | | False | \"PolicyPartiallyEnforced\" | \"KuadrantPolicy has encountered some issues and has been partially applied\" | | | False | \"PolicyOverridden\" | \"KuadrantPolicy is overridden by [policy-ns/policy-name]\" | | Failed | True | \"PolicyValidationError\" | \"KuadrantPolicy has failed to validate\" | | | True | \"PolicyServiceError\" | \"KuadrantPolicy has encountered has failed to enforce\" | | | False | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" |"},{"location":"architecture/rfcs/0004-policy-status/#prior-art","title":"Prior art","text":" - Kubernetes API Conventions
- Current KuadrantPolicy Status work
"},{"location":"architecture/rfcs/0004-policy-status/#unresolved-questions","title":"Unresolved questions","text":" - Is it worthy to implement a state machine or state machine design pattern to achieve this set of conditions?
"},{"location":"architecture/rfcs/0004-policy-status/#future-possibilities","title":"Future possibilities","text":"The implementation of this proposal could be part of kuadrant/gateway-api-machinery.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/","title":"Single Cluster DNSPolicy","text":" - Feature Name:
single-cluster-dnspolicy
- Start Date: 2023-10-09
- RFC PR: Kuadrant/architecture#30
- Issue tracking:
- Kuadrant/architecture#31
- Kuadrant/architecture#67
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#summary","title":"Summary","text":"Proposal for changes to the DNSPolicy
API to allow it to provide a simple routing strategy as an option in a single cluster context. This will remove, but not negate, the complex DNS structure we use in a multi-cluster environment and in doing so allow use of popular dns integrators such as external-dns .
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#motivation","title":"Motivation","text":"The DNSPolicy
API (v1alpha1), was implemented as part of our multi cluster gateway offering using OCM and as such the design and implementation were influenced heavily by how we want multi cluster dns to work.
- Decouple the API entirely from OCM and multi cluster specific concepts.
- Simplify the DNS record structure created for a gateway listeners host for single cluster use.
- Improve the likelihood of adoption by creating an integration path for other kubernetes dns controllers such as external-dns.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#guide-level-explanation","title":"Guide-level explanation","text":"The DNSPolicy can be used to target a Gateway in a single cluster context and will create dns records for each listener host in an appropriately configured external dns provider. In this context the advanced loadbalancing
configuration is unnecessary, and the resulting DNSRecord can be created mapping individual listener hosts to a single DNS A or CNAME record by using the simple
routing strategy in the DNSPolicy.
Example 1. DNSPolicy using simple
routing strategy
apiVersion: kuadrant.io/v1alpha2\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\n
apiVersion: gateway.networking.k8s.io/v1beta1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"myapp.mn.hcpapps.net\"\n port: 80\n protocol: HTTP\nstatus:\n addresses:\n - type: IPAddress\n value: 172.31.200.0\n
In the example the api
listener has a hostname myapp.mn.hcpapps.net
that matches a hosted zone being managed by the provider referenced my-route53-credentials
in the DNSPolicy. As the simple
routing strategy is set in the DNSPolicy a DNSRecord resource with the following contents will be created:
apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nmetadata:\n name: prod-web-api\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
The providerRef
is included in the DNSRecord to allow the dns record controller to load the appropriate provider configuration during reconciliation and create the DNS records in the dns provider service e.g. route 53.
Example 2. DNSPolicy using simple
routing strategy on multi cluster gateway
apiVersion: kuadrant.io/v1alpha2\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\n
apiVersion: gateway.networking.k8s.io/v1beta1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"myapp.mn.hcpapps.net\"\n port: 80\n protocol: HTTP\nstatus:\n addresses:\n - type: kuadrant.io/MultiClusterIPAddress\n value: 172.31.200.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: 172.31.201.0\n
Similar to example 1, except here the Gateway is a multi cluster gateway that has had its status updated by the Gateway
controller to include kuadrant.io/MultiClusterIPAddress
type addresses. As the simple
routing strategy is set in the DNSPolicy a DNSRecord resource with the following contents will be created:
apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nmetadata:\n name: prod-web-api\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n - 172.31.201.0\n
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#api-updates","title":"API Updates","text":"DNSPolicy:
- new providerRef field
spec.providerRef
- new routingStrategy field
spec.routingStrategy
- new api version
v1alpha2
DNSRecord:
spec.managedZone
replaced with spec.providerRef
- new zoneID field
spec.zoneID
- new api version
v1alpha2
ManagedZone:
- ManagedZone API wil be removed and no longer supported as part of MGC/Kuadrant.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnspolicyspecproviderref","title":"DNSPolicy.spec.providerRef","text":"The providerRef
field is mandatory and contains a reference to a secret containing provider credentials.
- `spec.providerRef.name` - name of the provider resource.\n
A DNSPolicy
referencing a providerRef secret will expect that secret to exist in the same namespace. The expected contents of the secrets data is comparable to the dnsProviderSecretRef
used by ManageZones.
apiVersion: v1\nkind: Secret\nmetadata:\n name: aws-credentials\ntype: kuadrant.io/aws\ndata:\n AWS_ACCESS_KEY_ID: \"foo\"\n AWS_SECRET_ACCESS_KEY: \"bar\"\n CONFIG:\n zoneIDFilter:\n\n - Z04114632NOABXYWH93QUl\n
The CONFIG
section of the secrets data will be added to allow provider specific configuration to be stored alongside the providers credentials and can be used during the instantiation of the provider client, and during any provider operations. The above for example would use the zoneIDFilter
value to limit what hosted zones this provider is allowed to update.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnspolicyspecroutingstrategysimpleweightedgeo","title":"DNSPolicy.spec.routingStrategy[simple|weightedGeo]","text":"The routingStrategy
field is mandatory and dictates what kind of dns record structure the policy will create. Two routing strategy options are allowed simple
or weightedGeo
.
A reconciliation of DNSPolicy processes the target gateway and creates a DNSRecord per listener that is supported by the currently configured provider(hostname matches the hosted zones accessible with the credentials and config). The routing strategy used will determine the contents of the DNSRecord resources Endpoints array.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#simple","title":"simple","text":"apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
Simple creates a single endpoint for an A record with multiple targets. Although intended for use in a single cluster context a simple routing strategy can still be used in a multi-cluster environment (OCM hub). In this scenario each clusters address will be added to the targets array to create a multi answer section in the dns response.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#weightedgeo","title":"weightedGeo","text":"apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: lb-4ej5le.myapp.mn.hcpapps.net\n providerSpecific:\n - name: geo-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: default.lb-4ej5le.myapp.mn.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n targets:\n - lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
WeightedGeo creates a more complex set of endpoints which use a combination of weighted and geo routing strategies. Although intended for use in a multi-cluster environment (OCM hub) it will still be possible to use it in a single cluster context. In this scenario the record structure described above would be created for the single cluster.
This is the current default for DNSPolicy in a multi-cluster environment (OCM hub) and more details about it can be found in the original DNSPolicy rfc.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnsrecordspecproviderref","title":"DNSRecord.spec.providerRef","text":"More details of providerRef
found in DNSPolicy.spec.providerRef
The DNSRecord API is updated to remove the managedZone
reference in favour of directly referencing the providerRef
credentials instead. The DNSRecord reconciliation will be unchanged except for loading the provider client from providerRef
credentials.
The DNSPolicy reconciliation will be updated to remove the requirement for a ManagedZone resource to be created before a DNSPolicy can create dns records for it, instead it will be replaced in favour of just listing available zones directly in the currently configured dns provider. If no matching zone is found, no DNSRecord will be created.
There is a potential for a DNSRecord to be created successfully, but then a provider updated to remove access. In this case it is the responsibility of the DNSPolicy controller to report appropriate status back to the policy and target resource about the failure to process the record. More details on how status will be reported can be found in rfc-0004
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnsrecordspeczoneid","title":"DNSRecord.spec.zoneID","text":"The zoneID
field is mandatory and contains the provider specific id of the hosted zone that this record should be published into.
The DNSRecord reconciliation will use this zone when creating/updating or deleting endpoints for this record set.
The zoneID
should not change after being selected during initial creation and as such will be marked as immutable.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#prior-art","title":"Prior art","text":"ExternalDNS
- Uses annotations on the target Gateway as opposed to a proper API.
- Requires access to the HTTP route resources.
- Supports only a single provider per external dns instance.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#unresolved-questions","title":"Unresolved questions","text":"When a provider is configured using a kind not supported by the DNSPolicy
controller e.g. ExternalDNS
we will be relying on an external controller to correctly update the status of any DNSRecord resources created by our policy. This may have a negative impact on our ability to correctly report status back to the target resource.
When using a weightedGeo routing strategy in a single cluster context it is not expected that this will offer multi cluster capabilities without the use of OCM. Currently, it is expected that if you want to create a recordset that contains the addresses of multiple clusters you must use an OCM hub.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#future-possibilities","title":"Future possibilities","text":"The ability to support other kubernetes dns controllers such as ExternalDNS would potentially allow us to contribute to some of these projects in the area of polices for dns management of Gateway resources in kubernetes.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/","title":"Configuration of Kuadrant Sub Components","text":" - Feature Name:
sub-components-config
- Start Date: 2023-09-11
- RFC PR: Kuadrant/architecture#25
- Issue tracking: Kuadrant/kuadrant-operator#163
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#summary","title":"Summary","text":"Enable configuration of sub components of Kuadrant from a centralized location, namely the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#motivation","title":"Motivation","text":"The initial request comes from MGC to configure Redis for Limitador by the following issue #163. MGC's current work around is to update the Limitador CR after the deployment with the configuration setting for Redis Instance. This change would allow for the configuration of sub components before the Kuadrant is deployed.
This reduces the number of CRs that users of Kuadrant are required to modify to get the installation they require. The sub components CRs (Authorino, Limitador) never have to be modified by a Kuadrant user (and should never be modified by a Kuadrant User).
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#guide-level-explanation","title":"Guide-level explanation","text":"As the Kuadrant operator would be responsible for reconciling these configurations into the requested components, restrictions and limitations can be placed on the components which maybe allowed in a standalone installation. An example in this space is the disk storage for Limitador which is a new feature and the Kuadrant installation may not want to support it till there is a proven track record for the feature.
For existing Kuadrant Users this may be a possible breaking changes if those users manually configure the Kuadrant sub components via their CRs. A guide can be created to help migrate the users configurations to the Kuadrant CR. This guide can be part of the release notes and/or possibly released before the release of Kuadrant.
The deployment configuration for each component can be placed in the Kuadrant CR. These configurations are then reconciled into the CRs for each component. Only the options below are exposed in the Kuadrant CR. All fields in the spec are optional.
apiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant-sample\nspec:\n limitador:\n afffinity: ...\n listener: ...\n pdb: ...\n replicas: ...\n resourceRequirements: ...\n storage: ...\n authorino:\n evaluatorCacheSize: ...\n healthz: ...\n listener: ...\n logLevel: ...\n metrics: ...\n oidcServer: ...\n replicas: ...\n tracing: ...\n volumes: ...\nstatus:\n ...\n
The Kuadrant operator will watch for changes in the Authorino and Limitador CRs, reconciling back any changes that a user may do to these configurations. How ever Kuadrant operator will not reconcile fields that are given above. An example of this is the image
field on the Authorino CR. This field allows a user to set the image that Authorino is deployed with. The feature is meant for dev and testing purposes. If a user wishes to use a different image, they can. Kuadrant assumes they know what they are doing but requires the user to set the change on the component directly.
Only the sub component operator will be responsible for actioning the configurations pasted from the Kuadrant CR to the sub components CR. This ensure no extra changes will be required in the sub operators to meet the needs of Kuadrant.
Status errors related to the configuration of the sub components should be reported back to the Kuadrant CR. The errors messages in Kuadrant state what components are currently having issue and which resource to review for more details.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#reference-level-explanation","title":"Reference-level explanation","text":"All the fields in the Authorino and Limitador CRs that are configurable in the Kuadrant CR are optional and have sound defaults. Kuadrant needs to remain installable with out having to set any spec in the Kuadrant CR.
The Kuadrant operator should only reconcile the spec that is given. This would mean if the user states the number of replicas to be used in one of the components only the replica field for that component should be reconciled. As the other fields would be blank at this stage, blank fields would not be reconciled to the component CR. By this behaviour a few things are being achieved. Component controllers define the defaults to be used in the components. Optional fields in the component CRs never get set with blank values. Blank values in the component CR could override the defaults of the components causing unexpected behaviour. Existing Kuadrant users may already have custom fields set in the component CRs. By only reconciling the fields set in the kuadrant CR this allows time for a user to migrate their custom configuration from the component CR to the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#fields-to-reconcile","title":"Fields to reconcile","text":"Fields being reconcile can be classified into different groups. These classifications are based around the tasks a user is achieve.
- Kubernetes native, setting that affect how Kubernetes handles the resource.
- Observability, configuration settings that allow insights into how the applications are operation. This can be Kubernetes native or external tooling.
- Application Settings, setting targeting the application and how it connects to external services.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#authorino-spec","title":"Authorino Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#kubernetes-native","title":"Kubernetes native","text":" - replicas
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#observability","title":"Observability","text":" - healthz
- logLevel
- metrics
- tracing
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#application-settings","title":"Application Settings","text":" - evaluatorCacheSize
- listener
- oidcServer
- volumes
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#limitador-spec","title":"Limitador Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#kubernetes-native_1","title":"Kubernetes native","text":" - afffinity
- pdb
- replicas
- resourceRequirements
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#application-settings_1","title":"Application Settings","text":" - listener
- storage
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#fields-not-reconciled","title":"Fields not reconciled","text":"There are a number of fields in both Authorino and Limitador that are not reconciled. Reasons for doing this are:
It is better to start with a sub set of features and expand to include more at a later date. Removing feature support is far harder than adding it.
There are four classifications the unreconciled fields fail into.
- Deprecated, fields that are deprecated and/or have plans to be removed from the spec in the future.
- Unsupported, the features would have hard coded or expected defaults in the Kuadrant operator. Work would be required to all the custom configurations options.
- Dev/Testing focused, features that should only be used during development & testing and not recommended for production. The defaults would for the fields are the production recommendations.
- Reconciled by others, this mostly affects Limitador as the deployment configuration and runtime configuration are in the same CR. In the case of Kuadrant the runtime configuration for Limitador is added via the RateLimitingPolicy CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#authorino-spec_1","title":"Authorino Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unsupported","title":"Unsupported","text":" - clusterWide
- authConfigLabelSelectors
- secretLabelSelectors
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#devtesting-focused","title":"Dev/Testing focused","text":" - image
- imagePullPolicy
- logMode
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#limitador-spec_1","title":"Limitador Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unsupported_1","title":"Unsupported","text":" - RateLimitHeaders
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#reconciled-by-others","title":"Reconciled by others","text":" - Limits
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#deprecated","title":"Deprecated","text":" - version
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#drawbacks","title":"Drawbacks","text":"As the Kuadrant CR spec will be a sub set of the features that can be configured in the sub components spec, extra maintenances will be required to ensure specs are in sync.
New features of a component will not be accessible in Kuadrant initially. This is both a pro and a con.
Documentation becomes harder, as the sub component should be documenting their own features but in Kuadrant the user does not configure the feature in sub component. This has the risk of confusing new users.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#rationale-and-alternatives","title":"Rationale and alternatives","text":" - Why is this design the best in the space of possible designs?
- What other designs have been considered and what is the rationale for not choosing them?
- What is the impact of not doing this?
One alternative that was being looked at was allowing the user to bring their own Limitador instances by stating which Limitador CR Kuadrant should use. A major point of issue with this approach was knowing what limits the user had configured and what limits Kuadrant configured. Sharing global counters is a valid reason to want to share Limitador instances. How ever it this case Limitador would not be using one replica and therefore would have a back-end storage configured. It is the back-end storage that needs to be shared across instances. This can be done with adding the configuration in the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#prior-art","title":"Prior art","text":"Discuss prior art, both the good and the bad, in relation to this proposal. A few examples of what this can include are:
- Does another project have a similar feature?
- What can be learned from it? What's good? What's less optimal?
- Papers: Are there any published papers or great posts that discuss this? If you have some relevant papers to refer to, this can serve as a more detailed theoretical background.
This section is intended to encourage you as an author to think about the lessons from other tentatives - successful or not, provide readers of your RFC with a fuller picture.
Note that while precedent set by other projects is some motivation, it does not on its own motivate an RFC.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unresolved-questions","title":"Unresolved questions","text":" - What parts of the design do you expect to resolve through the RFC process before this gets merged?
- What parts of the design do you expect to resolve through the implementation of this feature before stabilization?
- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC?
- Is there a need to add validation on the configuration?
- If a valid configuration is add to the Kuadrant CR and this configuration is pass to the sub components CR but there is a error trying to setting up the configuration. How is this error reported back to the user? An example of this is configuring Redis as the back-end in Limitador, this requires stating the name and namespace of a configmap. The Limitador CR will have an error if the configmap does not exist and as the user only configures the Kuadrant CR this error may go unnoticed. This is only one example but there is a need for good error reporting back to the user, where they would expect to see the error.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#future-possibilities","title":"Future possibilities","text":"Think about what the natural extension and evolution of your proposal would be and how it would affect the platform and project as a whole. Try to use this section as a tool to further consider all possible interactions with the project and its components in your proposal. Also consider how this all fits into the roadmap for the project and of the relevant sub-team.
This is also a good place to \"dump ideas\", if they are out of scope for the RFC you are writing but otherwise related.
Note that having something written down in the future-possibilities section is not a reason to accept the current or a future RFC; such notes should be in the section on motivation or rationale in this or subsequent RFCs. The section merely provides additional information.
The implementation stated here allows the user to state spec fields in the component CRs or the Kuadrant CR (Kuadrant CR overrides the component CRs). A future possibility would be to warn the user if they add configuration to the components CRs that would get overridden if the same spec fields are configured in the Kuadrant CR.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/","title":"RFC - Policy Sync","text":" - Feature Name:
policy_sync_v1
- Start Date: 2023-10-10
- RFC PR: Kuadrant/architecture#0000
- Issue tracking: https://github.com/Kuadrant/architecture/issues/26
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#summary","title":"Summary","text":"The ability for the Multicluster Gateway Controller to sync policies defined in the hub cluster downstream to the spoke clusters, therefore allowing all policies to be defined in the same place. These policies will be reconciled by the downstream policy controller(s).
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#nomenclature","title":"Nomenclature","text":" -
Policy: When refering to a Policy, this document is refering to a Gateway API policy as defined in the Policy Attachment Model. The Multicluster Gateway Controller relies on OCM as a Multicluster solution, which defines its own unrelated set of Policies and Policy Framework. Unless explicitely mentioned, this document refers to Policies as Gateway API Policies.
-
Policy overriding: The concept of policy overriding is mentioned in this document. It refers to the proposed ability of the downstream Gateway implementation to prioritise downstream Policies against synced Policies in case of conflicts.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#motivation","title":"Motivation","text":"Currently, Kuadrant's support for the Policy Attachment Model can be divided in two categories:
- Policies targeting the Multicluster Gateway, defined in the hub cluster and reconciled by the Multicluster Gateway Controller
- Policies targeting the downstream Gateway, defined in the spoke clusters and reconciled by the downstream Gateway controllers.
In a realistic multicluster scenario where multiple spoke clusters are present, the management of these policies can become tedious and error-prone, as policies have to be defined in the hub cluster, as well as replicated in the multiple spoke clusters.
As Kuadrant users:
- Gateway-admin has a set of homogeneous clusters and needs to apply per cluster rate limits across the entire set.
- Platform-admin with a set of clusters with rate limits applied needs to change rate limit for one particular cluster.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#guide-level-explanation","title":"Guide-level explanation","text":"The policy sync feature will allow a gateway-admin to configure, via GatewayClass parameters, a set of Policy GVRs to be synced by the Multicluster Gateway Controller.
The policiesToSync
field in the parameters defines those GVRs. For example, in order to configure the controller to sync AuthPolicies:
\"policiesToSync\": [\n {\n \"group\": \"kuadrant.io\",\n \"version\": \"v1beta1\",\n \"resource\": \"authpolicies\" \n }\n]\n
The support for resources that the controller can sync is limited by the following:
- The controller ServiceAccount must have permission to watch, list, and get the resource to be synced
- The resource must implement the Policy schema:
- Have a
.spec.targetRef
field
When a Policy is configured to be synced in a GatewayClass, the Multicluster Gateway Controller starts watching events on the resources, and propagates changes by placing the policy in the spoke clusters, with the following mutations:
- The
TargetRef
of the policy is changed to reference the downstream Gateway - The
kuadrant.io/policy-synced
annotation is set
The upstream policy is annotated with a reference to the name and namespace of the downstream policies:
annotations:\n \"kuadrant.io/policies-synced\": \"[{\\\"cluster\\\": \\\"...\\\", \\\"name\\\": \\\"...\\\", \\\"namespace\\\": \\\"...\\\"}]\"\n
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#process-overview","title":"Process overview","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#dynamic-policy-watches","title":"Dynamic Policy watches","text":"The Multicluster Gateway Controller reconciles parameters referenced by the GatewayClass of a Gateway. A new field is added to the parameters that allows the configuration of a set of GVRs of Policies to be synced.
The GatewayClass reconciler validates that:
- The GVRs reference existing resource definitions
- The GVRs reference resources that implement the Policy schema.
Validation failures are reported as part of the status of the GatewayClass
The Gateway reconciler sets up dynamic watches to react to events on the configured Policies, calling the PolicySyncer component with the updated Policy as well as the associated Gateway.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#policysyncer-component","title":"PolicySyncer component","text":"The PolicySyncer component is in charge of reconciling Policy watch events to apply the necessary changes and place the Policies in the spoke clusters.
This component is injected in the event source and called when a change is made to a hub Policy that has been configured to be synced.
The PolicySyncer implementation uses OCM ManifestWorks to place the policies in the spoke clusters. Through the ManifestWorks, OCM allows to:
- Place the Policy in each spoke cluster
- Report the desired status back to the hub using JSON feedback rules
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#policy-hierarchy","title":"Policy Hierarchy","text":"In order to avoid conflict with Policies created directly in the spoke clusters, a hierarchy must be defined to prioritise those Policies.
The controller will set the kuadrant.io/policy-synced
annotation on the policy when placing it in the spoke cluster.
The Kuadrant operator will be aware of the presence of this annotation, and, in case of conflicts, override Policies that contain this annotation. When a policy is overriden due to conflicts, the Enforced
status will be set to False
, with the reason being Overriden
and a human readable message explaining the reason why the policy was overriden. See Policy Status RFC
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#drawbacks","title":"Drawbacks","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#third-party-policy-support","title":"Third party Policy support","text":"In order for a Policy to be supported for syncing, the MGC must have permissions to watch/list/get the resource, and the implementation of the downstream Gateway controller must be aware of the policy-synced
annotation.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#alternatives","title":"Alternatives","text":"Different technology stacks are available to sync resources across clusters. However, adoption of these technologies for the purpose of the goal this RFC intends to achieve, implies adding another dependency to the current stack, with the cost of added complexity and maintainance effort.
The MGC currently uses OCM to place Gateways across clusters. Relying on OCM for the purpose of placing Policies is the most straightforward alternative from a design and implementation point of view.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#consequences-of-not-implementing","title":"Consequences of not implementing","text":"Gateway-admins will have no centralized system for handling spoke-level policies targeting a gateway created there from the hub.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#ocms-policy-framework-will-not-be-used-to-complete-this-objective","title":"OCMs Policy Framework will not be used to complete this objective:","text":"OCMs Policy Framework is a system designed to make assertions about the state of a spoke, and potentially take actions based on that state, as such it is not a suitable replacement for manifestworks in the case of syncing resources to a spoke.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#potential-migration-from-manifestworks-to-manifestworkreplicasets","title":"Potential migration from ManifestWorks to ManifestWorkReplicaSets","text":"ManifestWorkPeplicaSets may be a future improvement that the MGC could support to simplify the placement of related resources, but beyond the scope of this RFC.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#prior-art","title":"Prior art","text":"No applicable prior art.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#unresolved-questions","title":"Unresolved questions","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#status-reporting","title":"Status reporting","text":"While the controller can assume common status fields among the Policies that it syncs, there might be a scenario where certain policies use custom status fields that are not handled by the controller. In order to support this, two alternatives are identified:
-
Configurable rules.
An extra field is added in the GatewayClass params that configures the policies to sync, to specify custom fields that the controller must propagate back from the spokes to the hub.
-
Hard-coded support.
The PolicySync component can identify the Policy type and select which extra status fields are propagated
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#future-possibilities","title":"Future possibilities","text":"If OCMs Policy Framework is updated to enable syncing of resources status back to the hub, it could be an opportunity to refactor the MGC to use this framework in place of the current approach of creating ManifestWorks directly.
This system could mutate over time to dynamically sync more CRDs than policies to spoke clusters.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/","title":"Kuadrant Release Process","text":" - Feature Name:
kuadrant-release-process
- Start Date: 2024-01-11
- RFC PR: Kuadrant/architecture#46
- Issue tracking: Kuadrant/architecture#59
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#summary","title":"Summary","text":"Kuadrant is a set of components whose artifacts are built and delivered independently. This RFC aims to define every aspect of the event of releasing a new version of the whole, in terms of versioning, cadence, communication, channels, handover to other teams, etc.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#motivation","title":"Motivation","text":"At the time being, there's no clear process nor guidelines to follow when releasing a new version of Kuadrant, which leads to confusion and lack of transparency. We are currently relying on internal communication and certain people in charge of the release process, which is not ideal.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#guide-level-explanation","title":"Guide-level explanation","text":"First, we need to define what releasing Kuadrant means, in a clear and transparent way that communicates to the community what's happening and what to expect. The Kuadrant suite is composed of several components, each of them with its own set of artifacts and versioning scheme. Defining the release process of the whole suite is a complex task, and it's not only about the technical details of releasing the components, but also about the communication and transparency with the community, the definition of the frequency of the releases, and when it's ready to be handover to other teams like QA. This section aims to provide guidelines for the different aspects of the release process.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#components-and-versioning","title":"Components and versioning","text":"The set of components that are part of the Kuadrant suite are the following:
- Authorino: Kubernetes-native authorization service for tailor-made Zero Trust API security.
- Authorino Operator: A Kubernetes Operator to manage Authorino instances.
- Limitador: A generic rate-limiter written in Rust.
- Limitador Operator: A Kubernetes Operator to manage Limitador deployments.
- Wasm Shim: A Proxy-Wasm module written in Rust, acting as a shim between Envoy and Limitador.
- Multicluster Gateway Controller: Provides multi-cluster connectivity and global load balancing.
- DNS Operator: A Kubernetes Operator to manage DNS in single and multi-cluster environments.
- Kuadrant Operator: The Operator to install and manage the lifecycle of the Kuadrant components deployments. Example alerts and dashboards are also included as optional.
- kuadrantctl: A CLI tool for managing Kuadrant configurations and resources.
Each of them needs to be versioned independently, and the versioning scheme should follow Semantic Versioning. At the time of cutting a release for any of them, it's important to keep in mind what section of the version to bump, given a version number MAJOR.MINOR.PATCH, increment the:
- MAJOR version when you make incompatible API changes
- MINOR version when you add functionality in a backward compatible manner
- PATCH version when you make backward compatible bug fixes
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
A more detailed explanation of the versioning scheme can be found in the Semantic Versioning website.
By releasing a new version of Kuadrant, we mean releasing the set of components with their corresponding semantic versioning, some of them maybe freshly released, or others still using versioning from the previous one, and being the version of the Kuadrant Operator the one that defines the version of the whole suite.
Kuadrant Suite vx.y.z = Kuadrant Operator vx.y.z + Authorino Operator va.b.c + Limitador Operator vd.e.f + DNS Operator vg.h.i + MGC Controller vj.k.l + Wasm Shim vm.n.o\n
The technical details of how to release each component are out of the scope of this RFC and could be found in the Kuadrant components CI/CD RFC.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#qa-sanity-check","title":"QA Sanity Check","text":"Probably the most important and currently missing step in the release process is the green flagging from the Quality Assurance (QA) team. The QA team is responsible for testing the different components of the Kuadrant suite, and they need to be aware of the new version of the suite that is going to be released, what are the changes that are included, bug fixes and new features in order they can plan their testing processes accordingly. This check is not meant to be a fully fledged assessment from the QA team when it's handover to them, it's aimed to not take more than 1-2 days, and ideally expected to be fully automated. This step will happen once the release candidate has no PRs pending to be merged, and it has been tested by the Engineering team. The QA team should work closely to the engineering throughout the process, both teams aiming for zero handover time and continuous delivery mindset, so immediate testing can be triggered on release candidates once handed over. This process should happen without the need of formal communication between the teams or any overhead in general, but by keeping constant synergy between quality and product engineering instead.
There is an ideal time to hand over to the QA team for testing, especially since we are using GitHub for orchestration, we could briefly define it in the following steps:
- Complete Development Work: The engineering team completes their work included in the milestone.
- Create Release Candidate: The engineering team creates Release Candidate builds and manifests for all components required for the release
- Flagging/Testing: The QA team do the actual assertion/testing of the release candidate, checking for any obvious bugs or issues. Then QA reports all the bugs as GitHub issues and communicates testing status back publicly on Slack and/or email.
- Iterate: Based on the feedback from the QA team, the Engineering team makes any necessary adjustments and repeats the process until the release candidate is deemed ready for production.
- Publish Release: Once QA communicates that the testing has been successfully finished, the engineering team will publish the release both on Github and in the corresponding registries, updates documentation for the new release, and communicates it to all channels specified in Communication section.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#cadence","title":"Cadence","text":"Once the project is stable enough, and its adoption increases, the community will be expecting a certain degree of commitment from the maintainers, and that includes a regular release cadence. The frequency of the releases of the different components could vary depending on the particular component needs. However, the Kuadrant Operator it's been discussed in the past that it should be released every 3-4 weeks initially, including the latest released version of every component in the suite. There's another RFC that focuses on the actual frequency of each component, one could refer to the Kuadrant Release Cadence RFC.
There are a few reasons for this:
- Delivering Unparalleled Value to Users: Regular releases can provide users with regular updates and improvements. These updates can include new features and essential bug fixes, thus enhancing the overall value delivered to the users.
- Resource Optimization: By releasing software at regular intervals, teams can align their activities with available resources and environments, ensuring optimal utilization. This leads to increased efficiency in the deployment process and reduces the risk of resource wastage.
- Risk Management: Regular releases can help identify and fix issues early, reducing the risk of major failures that could affect users.
- Feedback Cycle: Regular releases allow for quicker feedback cycles. This means that any issues or improvements identified by users can be addressed promptly, leading to a more refined product over time.
- Synchronization: Regular releases can help synchronize work across different teams or departments, creating a more reliable, dependable solution development and delivery process.
- Reduced Complexity: Managing a smaller number of releases can reduce complexity. For example, having many different releases out in the field can lead to confusion and management overhead.
By committing to a release cadence, software projects can benefit from improved efficiency, risk management, faster feedback cycles, synchronization, and reduced complexity.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#repositories-and-hubs","title":"Repositories and Hubs","text":"Every component in Kuadrant has its own repository, and the source code is hosted in GitHub, mentioned in the previous section. However, the images built and manifests generated are hosted in different registries, depending on the component. The following table shows the different registries used by each component:
Component Artifacts Registry / Hub Authorino authorino images Quay.io Authorino Operator authorino-operator images Quay.io authorino-operator-bundle images Quay.io authorino-operator-catalog images Quay.io authorino-operator manifests OperatorHub.io Limitador limitador server images Quay.io limitador crate Crates.io Limitador Operator limitador-operator images Quay.io limitador-operator-bundle images Quay.io limitador-operator-catalog images Quay.io limitador-operator manifests OperatorHub.io Wasm Shim wasm-shim images Quay.io Multicluster Gateway Controller multicluster-gateway-controller images Quay.io multicluster-gateway-controller-bundle images Quay.io multicluster-gateway-controller-catalog images Quay.io DNS Operator dns-operator images Quay.io dns-operator-bundle images Quay.io dns-operator-catalog images Quay.io Kuadrant Operator kuadrant-operator images Quay.io kuadrant-operator-bundle images Quay.io kuadrant-operator-catalog images Quay.io kuadrant-operator manifests OperatorHub.io kuadrant-operator source (includes example dashboards and alerts) Github Releases kuadrantctl kuadrantctl CLI Github Releases"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#documentation","title":"Documentation","text":"It's important to note that keeping the documentation up to date is a responsibility of the component maintainers, and it needs to be done before releasing a new version of the component. The importance of keeping a clear and up-to-date documentation is crucial for the success of the project.
The documentation for the Kuadrant suite is compiled and available on the Kuadrant website. One can find the source of the documentation within each component repository, in the docs
directory. However, making this information available on the website is a manual process, and should be done by the maintainers of the project. The process of updating the documentation is simple and consists of the following steps:
- Update the documentation in the corresponding component repository.
- Follow the instruction in https://github.com/Kuadrant/docs.kuadrant.io/ to update the Docs pointers to the tag or branch of the component repository that contains the updated documentation.
- Once the changes are merged to main, the workflow that updates the website will be triggered, and the documentation will be updated.
- If for some reason it's needed to trigger the workflow manually, one can do it from the GitHub Actions tab in the docs.kuadrant.io (
Actions > ci > Run Workflow
).
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#communication","title":"Communication","text":"Another important aspect of releasing a new version of the Kuadrant suite is the communication with the community and other teams within the organization. A few examples of the communication channels that need to be updated are:
- Changelog generation
- Release notes
- Github Release publication
- Slack channel in Kubernetes workspace
- Blog post, if applicable
- Social media, if applicable
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#rationale-and-alternatives","title":"Rationale and alternatives","text":"The alternative to the proposal is to keep the current process, which is not ideal and leads to confusion and lack of transparency.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#prior-art","title":"Prior art","text":"There's been an organically grown process for releasing new versions of the Kuadrant suite, which is not documented and it's been changing over time. However, there are some documentation for some of the components, worth mentioning:
- Authorino release process
- Authorino Operator release process
- Limitador release process
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#unresolved-questions","title":"Unresolved questions","text":" - What would be Kuadrant support policy?
- How many version are we going to back-port security and bug fixes to?
- What other teams need to be involved in the release process?
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#future-possibilities","title":"Future possibilities","text":"Once the release process is accepted and battle-tested, we could aim to automate the process as much as possible.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/","title":"Defaults & Overrides","text":" - Feature Name:
defaults-and-overrides
- Start Date: 2024-02-15
- RFC PR: Kuadrant/architecture#58
- Issue tracking: Kuadrant/kuadrant-operator#431
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#summary","title":"Summary","text":"This is a proposal for extending the Kuadrant Policy APIs to fully support use cases of Defaults & Overrides (D/O) for Inherited Policies, including the base use cases of full default and full override, and more specific nuances that involve merging individual policy rules (as defaults or overrides), declaring constraints and unsetting defaults.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#motivation","title":"Motivation","text":"As of Kuadrant Operator v0.6.0, Kuadrant policy resources that have hierarchical effect across the tree of network objects (Gateway, HTTPRoute), or what is known as Inherited Policies, provide only limited support for setting defaults and no support for overrides at all.
The above is notably the case of the AuthPolicy and the RateLimitPolicy v1beta2 APIs, shipped with the aforementioned version of Kuadrant. These kinds of policies can be attached to Gateways or to HTTPRoutes, with cascading effects through the hierarchy that result in one effective policy per gateway-route combination. This effective policy is either the policy attached to the Gateway or, if present, the one attached to the HTTRoute, thus conforming with a strict case of implicit defaults set at the level of the gateway.
Enhancing the Kuadrant Inherited Policy CRDs, so the corresponding policy instances can declare defaults
and overrides
stanzas, is imperative:
- to provide full support for D/O along the lines proposed by GEP-713 (to be superseded by GEP-26491) of the Kubernetes Gateway API special group (base use cases);
- to extend D/O support to other derivative cases, learnt to be just as important for platform engineers and app developers who require more granular policy interaction on top of the base cases;
- to support more sophisticated hierarchies with other kinds of network objects and/or multiples policies targetting at the same level of the hierarchy (possibly, in the future.)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#guide-level-explanation","title":"Guide-level explanation","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#conceptualization-and-user-story","title":"Conceptualization and User story","text":"The base use cases for Defaults & Overrides (D/O) are:
- Defaults (D): policies declared lower in the hierarchy supersede ones set (as \"defaults\") at a higher level, or \"more specific beats less specific\"
- Overrides (O): policies declared higher in the hierarchy (as \"overrides\") supersede ones set at the lower levels, or \"less specific beats more specific\"
The base cases are expanded with the following additional derivative cases and concepts:
- Merged defaults (DR): \"higher\" default policy rules that are merged into more specific \"lower\" policies (as opposed to an atomic less specific set of rules that is activated only when another more specific one is absent)
- Merged overrides (OR): \"higher\" override policy rules that are merged into more specific \"lower\" policies (as opposed to an atomic less specific set of rules that is activated fully replacing another more specific one that is present)
- Constraints (C): specialization of an override that, rather than declaring concrete values, specify higher level constraints (e.g., min value, max value, enums) for lower level values, with the semantics of \"clipping\" lower level values so they are enforced, in an override fashion, to be the boundaries dictated by the constraints; typically employed for constraining numeric values and regular patterns (e.g. limited sets)
- Unsetting (U): specialization that completes a merge default use case by allowing lower level policies to disable (\"unset\") individual defaults set a higher level (as opposed to superseding those defaults with actual, more specific, policy rules with proper meaning)
Together, these concepts relate to solve the following user stories:
User story Group Unique ID As a Platform Engineer, when configuring a Gateway, I want to set a default policy for all routes linked to my Gateway, that can be fully replaced with more specific ones(*). D gateway-default-policy As a Platform Engineer, when configuring a Gateway, I want to set default policy rules (parts of a policy) for all routes linked to my Gateway, that can be individually replaced and/or expanded by more specific rules(*). DR gateway-default-policy-rule As a Platform Engineer, when defining a policy that configures a Gateway, I want to set constraints (e.g. minimum/maximum value, enumerated options, etc) for more specific policy rules that are declared(*) with the purpose of replacing the defaults I set for the routes linked to my Gateway. C policy-constraints As a Platform Engineer, when configuring a Gateway, I want to set a policy for all routes linked to my Gateway, that cannot be replaced nor expanded by more specific ones(*). O gateway-override-policy As a Platform Engineer, when configuring a Gateway, I want to set policy rules (parts of a policy) for all routes linked to my Gateway, that cannot be individually replaced by more specific ones(*), but only expanded with additional more specific rules(*). OR gateway-override-policy-rule As an Application Developer, when managing an application, I want to set a policy for my application, that fully replaces any default policy that may exist for the application at the level of the Gateway, without having to know about the existence of the default policy. D route-replace-policy As an Application Developer, when managing an application, I want to expand a default set of policy rules set for my application at the level of the gateway, without having to refer to those existing rules by name. D/O route-add-policy-rule As an Application Developer, when managing an application, I want to unset an individual default rule set for my application at the level of the gateway. U route-unset-policy-rule (*) declared in the past or in the future, by myself or any other authorized user.
The interactive nature of setting policies at levels in the hierarchy and by different personas, make that the following additional user stories arise. These are stories here grouped under the Observability (Ob) aspect of D/O, but referred to as well in relation to the \"Discoverability Problem\" described by Gateway API.
User story Group Unique ID As one who has read access to Kuadrant policies, I want to view the effective policy enforced at the traffic routed to an application, considering all active defaults and overrides at different policies(*). Ob view-effective-policy As a Platform Engineer, I want to view all default policy rules that may have been replaced by more specific ones(*). Ob view-policy-rule-status As a Policy Manager, I want to view all gateways and/or routes whose traffic is subject to enforcement of a particular policy rule referred by name. Ob view-policy-rule-reach (*) declared in the past or in the future, by myself or any other authorized user.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#writing-do-enabled-kuadrant-policies","title":"Writing D/O-enabled Kuadrant Policies","text":"Writing a Kuadrant policy enabled for Defaults & Overrides (D/O), to be attached to a network object, involves declaring the following fields at the first level of the spec:
targetRef
(required): the reference to a hierarchical network object targeted by the policy, typed as a Gateway API PolicyTargetReference
or PolicyTargetReferenceWithSectionName
type defaults
: a block of default policy rules with further specification of a strategy (atomic set of rules or individual rules to be merged into lower policies), and optional conditions for applying the defaults down through the hierarchy overrides
: a block of override policy rules with further specification of a strategy (atomic set of rules or individual rules to be merged into lower policies), and optional conditions for applying the overrides down through the hierarchy - the bare policy rules block without further qualification as a default or override set of rules \u2013 e.g. the
rules
field in a Kuadrant AuthPolicy, the limits
field in a RateLimitPolicy.
Between the following mutually exclusive options, either one or the other shall be used in a policy:
defaults
and/or overrides
blocks; or - the bare set of policy rules (without further qualification as neither defaults nor overrides.)
In case the bare set of policy rules is used, it is treated implicitly as a block of defaults.
Supporting specifying the bare set of policy rules at the first level of the spec, alternatively to the defaults
and overrides
blocks, is a strategy that aims to provide:
- more natural usability, especially for those who write policies attached to the lowest level of the hierarchy supported; as well as
- backward compatibility for policies that did not support explicit D/O and later on have moved to doing so.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#inherited-policies-that-declare-an-intent","title":"Inherited Policies that declare an intent","text":"A policy that does not specify D/O fields (defaults
, overrides
) is a policy that declares an intent.
One who writes a policy without specifying defaults
or overrides
, but only the bare set of policy rules, may feel like declaring a Direct Policy. Depending on the state of other policies indirectly affecting the same object or not, the final outcome can be the same as writing a direct policy. This is especially true when the policy that declares the intent targets an object whose kind is the lowest kind accepted by Kuadrant in the hierarchy of network resources, and there are no other policies with lower precedence.
Nevertheless, because other policies can affect the final behavior of the target (e.g. by injecting defaults, by overriding rules, by adding more definitions beneath), policies that simply declare an intent, conceptually, are still Inherited Policies.
Compared to the inherited policy that misses D/O blocks, these other policies affecting the behavior may be declared:
- at higher levels in the hierarchy,
- at lower levels in hierarchy, or even
- at the same level in the hierarchy but happening to have lower precedence (if such case is allowed by the kind of policy.)
At any time, any one of these policies can be created and therefore the final behavior of a target should never be assumed to be equivalent to the intent declared by any individual policy in particular, but always collectively determined by the combination of all intents, defaults and overrides from all inherited policies affecting the target.
From GEP-2649:
If a Policy can be used as an Inherited Policy, it MUST be treated as an Inherited Policy, regardless of whether a specific instance of the Policy is only affecting a single object.
An inherited policy that simply declares an intent (i.e. without specifying D/O) will be treated as a policy that implicitly declares an atomic set of defaults, whether the policy targets higher levels in the hierarchy or lower ones. In the absence of any other conflicting policy affecting the same target, the behavior equals the defaults which equal the intent.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#inherited-policies-that-modify-an-intent","title":"Inherited Policies that modify an intent","text":"A policy that specifies D/O fields (defaults
, overrides
) is a policy explicitly declared to modify an intent.
Without any other policy with lower precedence, there is no special meaning in choosing whether defaults or overrides in a inherited policy that targets an object whose kind is the lowest kind accepted by Kuadrant in the hierarchy of network resources. The sets of rules specified in these policies affect indistinctively the targeted objects, regardless of how they are qualified.
However, because other policies may ocasionally be declared with lower precedence (i.e. targeting lower levels in the hierarchy or due to ordering, see Conflict Resolution), one who declares a policy to modify an intent must carefuly choose between defaults
and/or overrides
blocks to organize the policy rules, regardless if the targeted object is of a kind that is the lowest kind in the hierarchy of network resources accepted by Kuadrant.
Even in the cases where no more than one policy of a kind is allowed to target a same object (1:1 relationship) and thus there should never exist two policies affecting a target from a same level of the hierarchy simultaneaously (or equivalently a policy with lower precedence than another, both at the lowest level of the hierarchy), users must assume that this constraint may change (i.e. N:1 relationship between policies of a kind and target become allowed.)
In all cases, defaults and overrides must be used with the semantics of declaring rules that modify an intent.
- When an intent does not specify a rule for which there is a higher default declared, the default modifies the intent by setting the value specified by the default.
- For an intent that whether specifies or omits a rule for which there is a higher override declared, the override modifies the intent by setting the value specified by the override.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#identifying-inherited-policy-kinds","title":"Identifying inherited policy kinds","text":"All Custom Resource Definitions (CRDs) that define a Kuadrant inherited policy must be labeled gateway.networking.k8s.io/policy: inherited
.
Users can rely on the presence of that label to identify policy kinds whose instances are treated as inhertied policies.
In some exceptional cases, there may be kinds of Kuadrant policies that do not specify defaults
and overrides
blocks, but that are still labeled as inherited policy kinds. Instances of these kinds of policies implicitly declare an atomic sets of defaults, similarly to described in Inherited Policies that declare an intent.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-of-do-enabled-kuadrant-policy","title":"Examples of D/O-enabled Kuadrant policy","text":"Example 1. Atomic defaults
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n strategy: atomic\n
The above is a proper Inherited Policy that sets a default atomic set of auth rules that will be set at lower objects in case those lower object do not have policies attached of their own at all.
The following is a sligthly different example that defines auth rules that will be individually merged into lower objects, evaluated one by one if already defined at the \"lower\" (more specific) level and therefore should take precedence, or if otherwise is missing at the lower level and therefore the default should be activated.
Example 2. Merged defaults
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n strategy: merge\n
Similarly, a set of overrides
policy rules could be specified, instead or alongside with the defaults
set of policy rules.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#atomic-vs-individually-merged-policy-rules","title":"Atomic vs. individually merged policy rules","text":"There are 2 supported strategies for applying proper Inherited Policies down to the lower levels of the herarchy:
- Atomic policy rules: the bare set of policy rules in a
defaults
or overrides
block is applied as an atomic piece; i.e., a lower object than the target of the policy, that is evaluated to be potentially affected by the policy, also has an atomic set of rules if another policy is attached to this object, therefore either the entire set of rules declared by the higher (less specific) policy is taken or the entire set of rules declared by the lower (more specific) policy is taken (depending if it's defaults
or overrides
), but the two sets are never merged into one. - Merged policy rules: each individual policy rule within a
defaults
or overrides
block is compared one to one against lower level policy rules and, when they conflict (i.e. have the same key with different values), either one or the other (more specific or less specific) is taken (depending if it's defaults
or overrides
), in a way that the final effective policy is a merge between the two policies.
Each block of defaults
and overrides
must specify a strategy
field whose value is set to either atomic
or merge
. If omitted, atomic
is assumed.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#level-of-granularity-of-compared-policy-rules","title":"Level of granularity of compared policy rules","text":"Atomic versus merge strategies, as a specification of the defaults
and overrides
blocks, imply that there are only two levels of granularity for comparing policies vis-a-vis.
-
atomic
means that the level of granularity is the entire set of policy rules within the defaults
or overrides
block. I.e., the policy is atomic, or, equivalently, the final effective policy will be either one indivisible (\"atomic\") set of rules (\"policy\") or the other.
-
For the merge
strategy, on the other hand, the granularity is of each named policy rule, where the name of the policy rule is the key and the value is an atomic object that specifies that policy rule. The final effective policy will be a merge of two policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#matrix-of-do-strategies-and-effective-policy","title":"Matrix of D/O strategies and Effective Policy","text":"When two policies are compared to compute a so-called Effective Policy out of their sets of policy rules and given default or override semantics, plus specified atomic
or merge
strategies, the following matrix applies:
Atomic (entire sets of rules) Merge (individual policy rules at a given granularity) Defaults More specific entire set of rules beats less specific entire set of rules \u2192 takes all the rules from the lower policy More specific individual policy rules beat less specific individual set of rules \u2192 compare one by one each pair of policy rules and take the lower one if they conflict Overrides Less specific entire set of rules beats more specific entire set of rules \u2192 takes all the rules from the higher policy Less specific individual policy rules beat more specific individual set of rules \u2192 compare one by one each pair of policy rules and take the higher one if they conflict The order of the policies, from less specific (or \"higher\") to more specific (or \"lower), is determined according to the Gateway API hierarchy of network resources, based on the kind of the object targeted by the policy. The policy that sets higher in the hierarchy dictates the strategy to be applied.
For a more detailed reference, including how to resolve conflicts in case of policies targeting objects at the same level, see GEP-713's section Hierarchy and Conflict Resolution.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#unsetting-inherited-defaults","title":"Unsetting inherited defaults","text":"In some cases, it may be desirable to be able to unset, at a lower policy, a merged default that is inherited from a higher one. In fact, some inherited defaults could be harmful to an application, at the same time as they are unfeasible to remove from scope for all applications altogether, and therefore require an exception.
Unsetting defaults via specification at lower level policies provides users who own policy rules at different levels of the hirarchy the option of not having to coordinate those exceptions \"offline\", nor having to accept the addition of special cases (conditions) at the higher level to exempt only specific lower policies from being affected by a particular default, which otherwise would configure a violation of the inheritance pattern, as well as an imposition of additional cognitive complexity for one who reads a higher policy with too many conditions.
Instead, users should continue to be able to declare their intents through policies, and redeem an entitlement to unset unapplicable defaults, without any leakage of lower level details upwards at the higher policies.
The option of unsetting inherited defaults is presented as part of the volition implied by the inheritance of policy rules, which are tipically specified for the more general case (e.g. at the level of a gateway, for all routes), though not necessarily applicable for all special cases beneath. If enabled, this feature helps disambiguate the concept of \"default\", which should not be understood strictly as the option to set values that protect the system in case of lack of specialisation. Rather, by its property of volition and changeability. I.e., by definition, every default policy rule is opt-out and specifies a value that is modifiable.
In constrast, a policy rule that is neither opt-out nor modifiable better fits the definition of an override. While a policy rule that is not opt-out, nor it sets a concrete default value to be enforced in the lack of specialisation, defines a requirement.
Finally, for the use case where users want to set defaults that cannot be unset (though still modifable), the very feature of unsetting defaults itself should be configurable, at least at the level of the system. This can be achieved with feature switches and policy validation, including backed by the cluster's RBAC if needed.
The capability of unsetting inherited defaults from an effective policy can be identified by the presence of the spec.unset
field in a policy. The value is a list of default named policy rules to be unset.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#conditionally-applying-do","title":"Conditionally applying D/O","text":"Users should be able to specify conditions for applying their blocks of defaults
and overrides
. These conditions aim to support exceptional cases where the blocks cannot be simply applied downwards, but rather depend on specifics found in the lower policies, while still defined in generic terms \u2013 as opposed to conditions that leak details of individual lower policies upwards.
Between a higher and a lower set of policy rules, the higher level dictates the conditions for its rules to be applied (either as defaults or as overrides) over the lower level, and never the other way around.
D/O conditions are identfied by the presence of the spec.defaults.when
or spec.overrides.when
fields in a policy. Those should be defined using Common Expression Language (CEL), evaluated in the control plane against the lower level specification that the higher level is being applied to. I.e. self
in the CEL expression is the lower policy.
A concrete useful application for conditionally enforcing a block of D/O is for specifying constraints for lower values. E.g. if a lower policy tries to set a value on a numeric field that is greater (or lower) than a given threshold, apply an override that sets that field value to equal to the threshold; otherwise, use the value declared by the lower policy.
In contrast, an example of trivially redundant application of D/O conditions would be specifying a default block of rules that is only applied when the lower level does not declare a more specific replacement. Since this is natural semantics of a default, one does not have to use conditions for that.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-of-do-cases","title":"Examples of D/O cases","text":"The following sets of examples generalize D/O applications for the presented user stories, regardless of details about specific personas and kinds of targeted resources. They illustrate the expected behavior for different cases involving defaults, overrides, constraints and unsetting.
Examples Highlighted user stories A. Default policy entirely replaced by another at lower level gateway-default-policy, route-replace-policy B. Default policy rules merged into policies at lower level gateway-default-policy-rule, route-add-policy-rule C. Override policy entirely replacing other at lower level gateway-override-policy D. Override policy rules merged into other at lower level gateway-override-policy-rule E. Override policy rules setting constraints to other at lower level policy-constraints F. Policy rule that unsets a default from higher level route-unset-policy-rule In all the examples, a Gateway and a HTTPRoute objects are targeted by two policies, and an effective policy is presented highlighting the expected outcome. This poses no harm to generalizations involving same or different kinds of targeted resources, multiples policies targeting a same object, etc.
The leftmost YAML is always the \"higher\" (less specific) policy; the one in the middle, separated from the leftmost one by a \"+\" sign, is the \"lower\" (more specific) policy; and the rightmost YAML is the expected Effective Policy.
For a complete reference of the order of hierarchy, from least specific to most specific kinds of resources, as well as how to resolve conflicts of hierarchy in case of policies targeting objects at the same level, see Gateway API's Hierarchy definition for Policy Attachment and Conflict Resolution.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-a-default-policy-entirely-replaced-by-another-at-lower-level","title":"Examples A - Default policy entirely replaced by another at lower level","text":"Example A1. A default policy that is replaced entirely if another one is set at a lower level
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-b-default-policy-rules-merged-into-policies-at-lower-level","title":"Examples B - Default policy rules merged into policies at lower level","text":"Example B1. A default policy whose rules are merged into other policies at a lower level, where individual default policy rules can be overridden or unset - without conflict
Example B2. A default policy whose rules are merged into other policies at a lower level, where individual default policy rules can be overridden or unset - with conflict
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-c-override-policy-entirely-replacing-other-at-lower-level","title":"Examples C - Override policy entirely replacing other at lower level","text":"Example C1. An override policy that replaces any other that is set at a lower level entirely
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-d-override-policy-rules-merged-into-other-at-lower-level","title":"Examples D - Override policy rules merged into other at lower level","text":"Example D1. An override policy whose rules are merged into other policies at a lower level, overriding individual policy rules with same identification - without conflict
Example D2. An override policy whose rules are merged into other policies at a lower level, overriding individual policy rules with same identification - with conflict
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-e-override-policy-rules-setting-constraints-to-other-at-lower-level","title":"Examples E - Override policy rules setting constraints to other at lower level","text":"The examples in this section introduce the proposal for a new when
field for the defaults
and overrides
blocks. This field dictates the conditions to be found in a lower policy that would make a higher policy or policy rule to apply, according to the corresponding defaults
or overrides
semantics and atomic
or merge
strategy.
Combined with a simple case of override policy (see Examples C), the when
condition field allows modeling for use cases of setting constraints for lower-level policies.
As here proposed, the value of the when
condition field must be a valid Common Expression Language (CEL) expression.
Example E1. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - lower policy is compliant with the constraint
Example E2. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - lower level violates the constraint
Example E3. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - merge granularity problem
The following example illustrates the possibly unintended consequences of enforcing D/O at strict levels of granularity, and the flip side of the strategy
field offering a closed set of options (atomic
, merge
).
On one hand, the API is simple and straightforward, and there are no deeper side effects to be concerned about, other than at the two levels provided (atomic sets or merged individual policy rules.) On the other hand, this design may require more offline interaction between the actors who manage conflicting policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-f-policy-rule-that-unsets-a-default-from-higher-level","title":"Examples F - Policy rule that unsets a default from higher level","text":"The examples in this section introduce a new field unset: []string
at the same level as the bare set of policy rules. The value of this field, provided as a list, dictates the default policy rules declared at a higher level to be removed (\"unset\") from the effective policy, specified by name of the policy rules.
Example F1. A policy that unsets a default policy rule set at a higher level
Example F2. A policy that tries to unset an override policy rule set a higher level
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#status-reporting-and-policy-discoverability","title":"Status reporting and Policy discoverability","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#possible-statuses-of-an-inherited-policy","title":"Possible statuses of an inherited policy","text":"An inherited policy can be at any of the following conditions (RFC 0004):
Type Status Reason Message Accepted True \"Accepted\" \"Policy has been accepted\" False \"Conflicted\" \"Policy is conflicted by <policy-ns/policy-name>\" False \"Invalid\" \"Policy is invalid\" False \"TargetNotFound\" \"Policy target <resource-name> was not found\" Enforced True \"Enforced\" \"Policy has been successfuly enforced[. The following defaults have been added by : x, y]\" True \"PartiallyEnforced\" \"Policy has been successfuly enforced. The following rules have been overridden by : a, b[; the following defaults have been added by : x, y]\" False \"Overridden\" \"Policy has been overridden by <policy-ns/policy-name>\" False \"Unknown\" \"Policy has encountered some issues\""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-discoverability-and-effective-policy","title":"Policy discoverability and Effective policy","text":"A special condition must be added to every object that is targeted by a Kuadrant inherited policy if the policy's Enforced
status condition is True
.
This special condition to be added to the target object is kuadrant.io/xPolicyAffected
, where \"xPolicy\" is the kind of the inherited policy (e.g. AuthPolicy, RateLimitPolicy.)
The possible statuses of an object regarding its sensitivity to one or more inherited policies are:
Type Status Reason Message xPolicyAffected False \"Unaffected\" \"The object is not affected by any xPolicy\" True \"Affected\" \"The object is affected by xPolicy <policy-ns/policy-name>\" True \"PartiallyAffected\" \"The following sections of the object are affected by xPolicy <policy-ns/policy-name>: rules.0, rules.2\" The presence of the PolicyAffected
status condition helps identify that an object is sensitive to one of more policies of a kind, and gives some specifics about the scope of that effect (entire object or selected sections.) In many cases, this should be enough for inferring the actual policy rules being enforced for that object.
For other cases where any of the following situations hold, a more detailed view of the final Effective Policy must be provided to the user:
- If the rules of the policy cannot be inferred by the name of the policy and/or the user lacks permission to read the policy object;
- If the object is affected by more than one policy.
To help visualize the effective policy for a given target object in that situation, at least one of the following options must be provided to the user:
- A read-only
EffectivePolicy
custom resource, defined for each kind of inherited policy, and with an instance created for each affected object, that is reconciled and updated by the policy controller. - A HTTP endpoint of the policy controller that users can consume to read the effective policy.
- A CLI tool that offers a command that queries the cluster and returns the effective policy \u2013 either by leveraging any of the methods above or computing the effective policy \"on-the-fly\" in the same fashion as the policy controller does.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#applying-policies","title":"Applying policies","text":"The following diagrams are a high level model to guide the process of applying a set of policies of a kind for a given Gateway object, where the Gateway object is considered the root of a hierarchy, and for all objects beneath, being the xRoute objects the leaves of the hierarchical tree.
As presented, policies can target either Gateways of route objects (HTTPRoutes, GRPCRoutes), with no restriction regarding the number of policies of a kind that target a same particular object. I.e. N:1 relationship allowed. Without any loss of generality, 1:1 relationship between policies of a kind and targeted objects can be imposed if preferred as a measure to initially reduce the blast of information for the user and corresponding cognitive load.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#apply-policies-to-a-gateway-root-object-and-all-objects-beneath","title":"Apply policies to a Gateway (root object) and all objects beneath","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n start([For a Gateway <i>g</i><br>and policy kind <i>pk</i>]) -->\n list-routes[List all routes<br>accepted by <i>g</i> as <i>R</i>] -->\n apply-policies-for-r\n subgraph for-each-route[For each <i>r in R</i>]\n apply-policies-for-r[[Apply policies<br>of kind <i>pk</i><br>scoped for <i>r</i>]] -->\n apply-policies-for-r\n end\n for-each-route -->\n build-virtual-route[Build a virtual route <i>vr</i><br>with all route rules not<br>target by any policy] -->\n apply-policies-for-vr[[Apply policies<br>of kind <i>pk</i><br>scoped for <i>vr</i>]] -->\n finish(((END)))
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#apply-policies-of-a-kind-for-an-object","title":"Apply policies of a kind for an object","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n apply-policies-for-o-start([Apply policies of kind <i>pk</i><br>scoped for an object <i>o</i>]) -->\n list-policies[Make <i>P</i> \u2190 all policies <br>of kind <i>pk</i> that<br>affect <i>o</i>] -->\n sort-policies[Sort <i>P</i> from<br>lowest to highest] -->\n build-effective-policy[Build an effective<br>policy <i>ep</i> without<br>any policy rules] -->\n merge-p-into-ep\n subgraph for-each-policy[For each policy <i>p in P</i>]\n merge-p-into-ep[[Merge <i>p into <i>ep</i>]] -->\n merge-p-into-ep\n end\n for-each-policy -->\n reconcile-ep[Reconcile resources<br>for <i>ep</i>] -->\n apply-policies-for-o-finish(((END)))
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-two-policies-together","title":"Merging two policies together","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-p1-into-p2-start([Merge policy <i>p1</i><br>into policy <i>p2</i>]) -->\n p1-format{Explicit<br><i>defaults</i> or <i>overrides</i><br>declared in <i>p1</i>?}\n p1-format -- Yes --> merge-defaults-for-r[[\"Merge <b>defaults</b> block<br>of policy rules<br>of <i>p1</i> into <i>p2</i>\"]] --> merge-overrides-for-r[[\"Merge <b>overrides</b> block<br>of policy rules<br>of <i>p1</i> into <i>p2</i>\"]] --> merge-p1-into-p2-finish(((Return <i>p2</i>)))\n p1-format -- No --> merge-bare-rules-for-r[[\"Merge ungrouped<br>block of policy rules<br>of <i>p1</i> into <i>p2</i><br>(as <b>defaults</b>)\"]] --> merge-p1-into-p2-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-a-generic-block-of-policy-rules-defaults-or-overrides-into-a-policy-with-conditions","title":"Merging a generic block of policy rules (defaults or overrides) into a policy with conditions","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-block-of-rules-into-p-start([Merge block of<br>policy rules <i>B</i><br>into policy <i>p</i>]) -->\n r-conditions-match{\"<i>B.when(p)</i>\"}\n r-conditions-match -- \"Conditions do not match\" --> merge-block-of-rules-into-p-finish(((Return <i>p</i>)))\n r-conditions-match -- \"Conditions match\" --> block-semantics{Merge <i>B</i> as}\n block-semantics -- \"Defaults\" --> merge-default-block-into-p[[Merge default block<br>of policy rules <i>B</i><br>into policy <i>p</i>]] --> merge-block-of-rules-into-p-finish\n block-semantics -- \"Overrides\" --> merge-override-block-into-p[[Merge override block<br>of policy rules <i>B</i><br>into policy <i>p</i>]] --> merge-block-of-rules-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merge-a-defaults-block-of-policy-rules-into-a-policy","title":"Merge a defaults
block of policy rules into a policy","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-default-block-into-p-start([Merge default block<br>of policy rules <i>B</i><br>into policy <i>p</i>]) -->\n unset-unwanted-policy-rules[Remove from <i>B</i><br>all policy rules<br>listed in <i>p.unset</i>] -->\n p-empty{<i>p.empty?</i>}\n p-empty -- \"Yes\" --> full-replace-p-with-defaut-block[<i>p.rules \u2190 B</i>] --> merge-default-block-into-p-finish(((Return <i>p</i>)))\n p-empty -- \"No\" --> default-block-strategy{<i>B.strategy</i>}\n default-block-strategy -- \"Atomic\" --> merge-default-block-into-p-finish\n default-block-strategy -- \"Merge\" --> default-p-r-exists\n subgraph for-each-default-policy-rule[\"For each <i>r in B<i>\"]\n default-p-r-exists{\"<i>p[r.id].exists?</i>\"}\n default-p-r-exists -- \"Yes\" --> default-p-r-exists\n default-p-r-exists -- \"No\" --> default-replace-pr[\"<i>p[r.id] \u2190 r</i>\"] --> default-p-r-exists\n end\n for-each-default-policy-rule -->\n merge-default-block-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merge-an-overrides-block-of-policy-rules-into-a-policy","title":"Merge an overrides
block of policy rules into a policy","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-override-block-into-p-start([Merge override block<br>of policy rules <i>B</i><br>into policy <i>p</i>]) -->\n override-block-strategy{<i>B.strategy</i>}\n override-block-strategy -- \"Atomic\" --> full-replace-p-with-override-block[<i>p.rules \u2190 B</i>] --> merge-override-block-into-p-finish(((Return <i>p</i>)))\n override-block-strategy -- \"Merge\" --> override-replace-pr\n subgraph for-each-override-policy-rule[\"For each <i>r in B<i>\"]\n override-replace-pr[\"<i>p[r.id] \u2190 r</i>\"] --> override-replace-pr\n end\n for-each-override-policy-rule -->\n merge-override-block-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#implementation-tiers","title":"Implementation tiers","text":"This section proposes a possible path for the implementation of this RFC for Kuadrant's existing kinds of policies that are affected by D/O \u2013 notably AuthPolicy and RateLimitPolicy.
The path is divided in 3 tiers that could be delivered in steps, additionaly to a series of enhancements & refactoring.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-1","title":"Tier 1","text":" - Atomic defaults (currently supported; missing addition of the
defaults
field to the APIs) - Atomic overrides
- Policy status and Policy discoverability (i.e. PolicyAffected status on target objects)
- CRD labels
gateway.networking.k8s.io/policy: inherited | direct
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-2","title":"Tier 2","text":" - D/O
when
conditions (and support for \"constraints\") - Merge strategy
- Reporting of effective policy
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-3","title":"Tier 3","text":" - Unsetting (
unset
) - Metrics for D/O policies (control plane)
- Docs: possible approaches for \"requirements\"
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#enhancements-and-refactoring","title":"Enhancements and refactoring","text":" - Extract generic part of D/O implementation to Kuadrant/gateway-api-machinery.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#drawbacks","title":"Drawbacks","text":"See Mutually exclusive API designs > Design option: strategy
field.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#mutually-exclusive-api-designs","title":"Mutually exclusive API designs","text":"The following alternatives were considered for the design of the API spec to support D/O:
strategy
field - RECOMMENDED granularity
field when
conditions (at any level of the spec) - CEL functions (at any level of the spec)
- \u201cpath-keys\u201d
- JSON patch-like
All the examples in the RFC are based on API design strategy
field.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-strategy-field","title":"Design option: strategy
field","text":"Each block of defaults
and overrides
specify a field strategy: atomic | merge
, with atomic
assumed if the field is omitted.
All the examples in the RFC are based on this design for the API spec.
Some of the implications of the design are explained in the section Atomic vs. individually merged policy rules, with highlights to the support for specifying the level of atomicity of the rules in the policy based on only 2 granularities \u2013 entire set of policy rules (atomic
) or to the level of each named policy rule (merge
.)
\u2705 Pros \u274c Cons - Same schema as a normal policy without D/O
- Declarative
- Safe against \"unmergeable objects\" (e.g. two rules declaring different one-of options)
- Strong types
- Extensible (by adding more fields, e.g.: to support unsetting defaults)
- Easy to learn
- 2 levels of granularity only \u2013 either all (\u2018atomic\u2019) or policy rule (\u2018merge\u2019)
- 1 granularity declaration per D/O block \u2192 declaring both \u2018atomic\u2019 and \u2018merge\u2019 simultaneously requires 2 separate policies targeting the same object
The design option based on the strategy
field is the RECOMMENDED design for the implementation of Kuadrant Policies enabled for D/O. This is due to the pros above, plus the fact that this design can evolve to other, more versatile forms, such as granularity
field, when
conditions or CEL functions, in the future, while the opposite would be harder to achieve.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-granularity-field","title":"Design option: granularity
field","text":"Each block of defaults
and overrides
would specify a granularity
field, set to a numeric integer value that describes which level of the policy spec, from the root of the set of policy rules until that number of levels down, to treat as the key, and the rest as the atomic value.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n granularity: 0 # the entire spec (\"rules\") is an atomic value\n overrides:\n rules:\n metadata:\n \"c\": {\u2026}\n response:\n \"d\": {\u2026}\n granularity: 2 # each policy rule (\"c\", \"d\") is an atomic value\n
\u2705 Pros \u274c Cons - Same as design option
strategy
field - Unlimited levels of granularity (values can be pointed as atomic at any level)
- 1 granularity declaration per D/O block \u2192 N levels simultaneously require N policies
- Granularity specified as a number - user needs to count the levels
- Setting a deep level of granularity can cause merging \"unmergeable objects\"
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-when-conditions-at-any-level-of-the-spec","title":"Design option: when
conditions (at any level of the spec)","text":"Inspired by the extension of the API for D/O with an additional when
field (see Examples E), this design alternative would use the presence of this field to signal the granularity of the atomic operation of default or override.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n when: CEL # level 1 - entire \"authentication\" block\n authorization:\n \"b\":\n \"prop-1\": {\u2026}\n when: CEL # level 2 - \"b\" authorization policy rule\n
\u2705 Pros \u274c Cons - Same as
granularity
field - As many granularity declarations per D/O block as complex objects in the policy
- Granularity specified \u201cin-place\u201d
- Setting a deep level of granularity can cause merging \"unmergeable objects\"
- Implementation nightmare - hard to define the API from existing types
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-cel-functions-at-any-level-of-the-spec","title":"Design option: CEL functions (at any level of the spec)","text":"This design option leans on the power of Common Expression Language (CEL), extrapolating the design alternative with when
conditions beyond declaring a CEL expression just to determine if a statically declared value should apply. Rather, it proposes the use of CEL functions that outputs the value to default to or to ovrride with, taking the conflicting \"lower\" value as input, with or without a condition as part of the CEL expression. The value of a key set to a CEL function indicates the level of granularity of the D/O operation.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026} # static value\n \"b\": \"cel:self.value > 3 ? AuthenticationRule{value: 3} : self\"\n authorization: |\n cel:Authorization{\n c: AuthorizationRule{prop1: \"x\"}\n }\n
\u2705 Pros \u274c Cons - Unlimited levels of granularity
- Granularity specified \u201cin-place\u201d
- Extremely powerful
- Elegant and simple implementation-wise
- Weakly typed
- Implementation completely new \u2013 cannot reuse current API types
- Requires all types to be defined as protobufs
- Without strong guardrails, users can easily shoot themselves in the foot
- Validation likely requires complex functions for parsing the CEL expressions
- Non-declarative
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-path-keys","title":"Design option: \u201cpath-keys\u201d","text":"A more radical alternative considered consisted of defining defaults
and overrides
blocks whose schemas would not match the ones of a normal policy without D/O. Instead, these blocks would consist of simple key-value pairs, where the keys specify the paths in an affected policy where to apply the value atomically.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n \"rules.authentication\":\n \"a\": {G}\n \"rules.authorization.b\": {G}\n
\u2705 Pros \u274c Cons - D/O as simple key-value sets (keys: where to apply, values: what to apply)
- Declarative
- Unlimited levels of granularity (values can be pointed as atomic at any level)
- Unlimited merge declarations per D/O block
- Intuitive, easy-to-learn
- Not same schema as the normal policy (without D/O) - not very GWAPI-like
- Weakly typed (i.e.
map[string]any)
- Not extensible (e.g., cannot add other fields to the API)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-json-patch-like","title":"Design option: JSON patch-like","text":"Similar to the path-keys design option, inspired by JSON patch operations, to provide more kinds of operations and extensibility.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n\n - path: rules.authentication\n operation: add\n value: { \"a\": {G} }\n - path: rules.authorization.b\n operation: remove\n - path: |\n rules.authentication.a.\n value\n operation: le\n value: 50\n
\u2705 Pros \u274c Cons - Same as \"path-keys\" field
- Extensible, all kinds of operations supported (add, remove, constraint)
- Not same schema as the normal policy (without D/O) - not very GWAPI-like
- Less declarative
- Weakly typed (i.e.
value: any)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#prior-art","title":"Prior art","text":"Other than the primitive support only for implicit atomic defaults provided by Kuadrant for the AuthPolicy and RateLimitPolicy, other real-life implementations of D/O along the lines proposed by Gateway API are currently unknown.
Some orientative examples provided in:
- GEP-2649 - search for \"CDNCachingPolicy\" as well as \"Merging into existing spec fields\";
gwctl
effective policy calculation for inherited policies - see policy manager's merge test cases.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#out-of-scope","title":"Out of scope","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-requirements","title":"Policy requirements","text":"A use case often described in association with D/O is the one for declaring policy requirements. These are high level policies that declare requirements to be fulfilled by more specific (lower level) policies without specifying concrete default or override values nor constraints. E.g.: \"an authentication policy must be enforced, but none is provided by default.\"
A typical generic policy requirement user story is:
As a Platform Engineer, when configuring a Gateway, I want to set policy requirements to be fulfilled by one who manages an application/route linked to my Gateway, so all interested parties, including myself, can be aware of applications deployed to the cluster that lack a particular policy protection being enforced.
Policy requirements as here described are out of scope of this RFC.
We believe policy requirement use cases can be stated and solved as an observability problem, by defining metrics and alerts that cover for missing policies or policy rules, without necessarily having to write a policy of the same kind to express such requirement to be fulfilled.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#unresolved-questions","title":"Unresolved questions","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-policies-with-references-to-external-objects","title":"Merging policies with references to external objects","text":"How to handle merges of policies from different namespaces that contain references to other objects (e.g. Secrets)?
Often policies rules include references to other Kubernetes objects, such as Secrets, typically defined in the same namespace as the policy object. When merging policies from different namespaces, these references need to be taken into account.
If not carried along with the derivative resources (e.g. Authorino AuthConfig objects) that are created from a merge of policies (or from the computed effective policy), composed out of definitions from different namespaces, and that depend on those references, these references to external objects can be broken.
This is not much of a problem for atomic D/O only, as the derivative objects that depend on the references could be forced to be created in the same namespace as the policy that wins against all the others \u2013 and therefore in the same namespace of the winning referents as well. However, when merging policies, we can run into a situation where final effective policies (thus also other derivative resources) contain references to objects inherited from definitions from other namespaces.
Possible solutions to this problem include:
- Copying the referenced objects into the namespace where the derivative resources will be created.
- Involves maintaining (watching and reconciling) those referenced objects
- May raise security concerns
- Allowing derivative resources (e.g. Authorino AuthConfigs) to reference objects across namespaces, as well as giving permissions to the components that process those references (e.g. Authorino) to read across namespaces
- May raise security concerns
- Should probably be restricted to derivative resources created by Kuadrant and not allowed to users who create the derivative resources themselves
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-spec-resembling-more-the-target-spec","title":"Policy spec resembling more the target spec","text":"Should Kuadrant's inherited policy specs resemble more the specs of the objects they target?
The UX for one who writes a Kuadrant policy of the inherited class of policies is arguably not very different from writing any custom resource that happens to specify a targetRef
field. Other than name and kind of target object, there is nothing much in a Kuadrant policy custom resource that provides the user with an experience almost close to be \"adding fields\" in the target object.
With the exception of a few types reused for the route selectors, the spec of a Kuadrant policy is very different from the spec of the object that ultimately the policy augments, i.e. the spec of the route object. This remains basically unchanged after this RFC. However, another way to think on the design of those APIs is one where, in contrast, the specs of the policies partially mirror the spec of the route, so users can write policies in a more intuitive fashion, as if the definitions of the policy would look like extensions of the routes they target (directly or by targeting gateways the routes are attached to.)
E.g.:
kind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n rules:\n\n - name: rule-1\n matches:\n - method: GET\n backendRef: {\u2026}\n - name: rule-2\n backendRef: {\u2026}\n
An inherited policy that targets the HTTPRoute above could otherwise look like the following:
kind: Policy\nmetadata:\n name: my-policy\nspec:\n targetRef:\n kind: HTTPRoute\n name: my-route\n defaults: # mirrors the spec of the httproute object\n policySpecificDef: {\u2026} # augments the entire httproute object\n overrides: # mirrors the spec of the httproute object\n rules:\n\n - name: rule-2\n policySpecificDef: {\u2026} # augments only httprouterule rule-2 of the httproute object\n
The above already is somewhat closer to being true for the AuthPolicy API, than it is for the RateLimitPolicy one. However, that is strictly coincidental, because the AuthPolicy's spec happens to specify a rules
field, where the equivalent at the same level in RateLimitPolicy is called limits
.
This alternative design could make writing policies more like defining filters in an HTTPRoute, with the difference that policies are external to the target they extend (while filters are internal.) At the same time, it could be a replacement for Kuadrant route selectors, where the context of applicability of a policy rule is given by the very structure within the spec how the policy rule is declared (resembling the one of the target), thus also would shaping context for D/O.
One caveat of this design though is that each policy specific definition (i.e. the rule specification that extends the object at a given point defined by the very structure of the spec) is exclusive of that given point in the structure of the object. I.e., one cannot specify a single policy rule that augments N > 1 specific rules of a target HTTPRoute.
Due to its relevance to the design of the API that enables D/O, this was left as an unresolved question. To be nonetheless noticed that, as a pattern, this alternative API design extends beyond inherited policies, impacting as well the direct policy kinds DNSPolicy and TLSPolicy.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#future-possibilities","title":"Future possibilities","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#n1-policy-target-relationship","title":"N:1 policy-target relationship","text":"Although this proposal was thought to keep options open for multiple policies of a kind targeting a same network resource, this is currently not the state of things for Kuadrant. Instead, Kuadrant enforces 1:1 relationship between policies of a kind and target resources.
Supporting N:1 relationships could enable use cases such as of App Developers defining D/O for each other at the same level of a shared xRoute, as well as Platform Engineers setting different policy rules on the same Gateway.
This could provide an alternative to achieving separation of concerns for complex policy kinds such as the AuthPolicy, where different users could be responsible for authentication and authorization, without necessarily depending on defining new kinds of policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#route-rule-name-and-targetrefsectionname","title":"Route rule name
and targetRef.sectionName
","text":"If Gateway API's GEP-995 is accepted (i.e. kubernetes-sigs/gateway-api#2593 gets merged) and the name
field for route rules implemented in the APIs (HTTPRoute and GRPCRoute), this could impact how Kuadrant delivers D/O. Although the semantics could remain the same, the users specify the scope for a given set of policy rules could simplify significantly.
As of today, Kuadrant's AuthPolicy and RateLimitPolicy APIs allow users to target sections of a HTTPRoute based on route selectors, and thus all the conflict resolution involved in handling D/O must take that logics into account.
With named route rules supported by Gateway API, either route selectors could be redefined in a simpler form where each selector consists of a list of names of rules and/or entire policies could be scoped for a section of a resource, by defining the targetRef
field based on the PolicyTargetReferenceWithSectionName
type.
To be noted GEP-2649's recommendation of not defining inherited policies that allow for sectionName
in the targetRef
. Nonetheless, this is a general rule from the spec said to be acceptable to be broken in the spirit of offering better functionality to users, provided it can deal with the associated discoverability and complexity problems of this feature.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#use-listmaptype-instead-of-maps-of-policy-rules","title":"Use listMapType instead of maps of policy rules","text":"Despite having recently modified the AuthPolicy and RateLimitPolicy APIs to use maps for declaring policy rules instead of lists (RFC 0001), reverting this design in future versions of these APIs, plus treating those lists as listMapType
, could let us leverage the API server's strategic merge type to handle merges between policy objects.
In the Policy CRDs, the policy rule types must specify a name
field (required). The list of rules type (i.e. []Rule
) must then speficy the following Kubebuilder CRD processing annotations:
// +listType=map\n// +listMapKey=name\n
-
As the time of writing, GEP-713 (Kubernetes Gateway API, SIG-NETWORK) is under revision, expected to be split into two separate GEPs, one for Direct Policies (GEP-2648) and one for Inherited Policies (GEP-2649.) Once these new GEPs supersede GEP-713, all references to the previous GEP in this document must be updated to GEP-2649.\u00a0\u21a9
"},{"location":"api-quickstart/","title":"API Quickstart","text":""},{"location":"api-quickstart/#introduction","title":"Introduction","text":"This document details how to setup a local reference architecture, and design and deploy an API. This will show the following API management features in a kube native environment using Kuadrant and other open source tools:
- API design
- API security and access control
- API monitoring
- Traffic management and scalability
The sections in this document are grouped by the persona that is typically associated with the steps in that section. The 3 personas are:
- The platform engineer, who provides and maintains a platform for application developers,
- the application developer, who designs, builds and maintains applications and APIs,
- and the api consumer, who makes API calls to the API
"},{"location":"api-quickstart/#pre-requisities","title":"Pre-requisities","text":" docker
: https://www.docker.com/products/docker-desktop/ kind
: https://kind.sigs.k8s.io/ kubectl
: https://kubernetes.io/docs/reference/kubectl/ kustomize
: https://kustomize.io/ helm
: https://helm.sh/docs/intro/install/ operator-sdk
: https://sdk.operatorframework.io/docs/installation/ - An AWS account with a Secret Access Key and Access Key ID. You will also need to a Route 53 zone.
"},{"location":"api-quickstart/#platform-engineer-platform-setup","title":"(Platform engineer) Platform Setup","text":"Export the following env vars:
export KUADRANT_AWS_ACCESS_KEY_ID=<key_id>\nexport KUADRANT_AWS_SECRET_ACCESS_KEY=<secret>\nexport KUADRANT_AWS_REGION=<region>\nexport KUADRANT_AWS_DNS_PUBLIC_ZONE_ID=<zone>\nexport KUADRANT_ZONE_ROOT_DOMAIN=<domain>\n
Clone the api-quickstart repo and run the quickstart script:
git clone git@github.com:Kuadrant/api-quickstart.git && cd api-quickstart\n./quickstart.sh\n
This will take several minutes as 3 local kind clusters are started and configured in a hub and spoke architecture. The following components will be installed on the clusters:
- Hub
- Open Cluster Management, as a 'hub' cluster
- Kuadrant Multi Cluster Gateway Controller, for managing a Gateway in multiple clusters centrally
- Gatekeeper, for constraints on Gateway Policy requirements
- Thanos, for receiving metrics centrally
- Grafana, for visualising API & Gateway metrics
- Spoke x2
- Open Cluster Management, as a 'spoke' cluster
- Kuadrant Operator, for auth and rate limiting policies attached to a HTTPRoute
- Istio, with the Gateway API CRDs as the Gateway for ingress trafic
- MetalLB, for exposing the Gateway service on the local network
- Prometheus, for scraping and federating metrics to the hub
"},{"location":"api-quickstart/#verify-the-gateway-and-configuration","title":"Verify the Gateway and configuration","text":"View the ManagedZone, Gateway and TLSPolicy. The ManagedZone and TLSPolicy should have a Ready status of true. The Gateway should have a Programmed status of True.
kubectl --context kind-api-control-plane get managedzone,tlspolicy,gateway -n multi-cluster-gateways\n
"},{"location":"api-quickstart/#guard-rails-constraint-warnings-about-missing-policies-dns-tls","title":"Guard Rails: Constraint warnings about missing policies ( DNS, TLS)","text":"Running the quick start script above will bring up Gatekeeper and the following constraints:
- Gateways must have a TLSPolicy targeting them
- Gateways must have a DNSPolicy targeting them
To view the above constraints in kubernetes, run this command:
kubectl --context kind-api-control-plane get constraints\n
Info
Since a gateway has been created automatically, along with a TLSPolicy
, the violation for a missing DNSPolicy
will be active until one is created.
"},{"location":"api-quickstart/#grafana-dashboard-view","title":"Grafana dashboard view","text":"To get a top level view of the constraints in violation, the Stitch: Platform Engineer Dashboard
can be used. This can be accessed by at https://grafana.172.31.0.2.nip.io
Grafana has a default username and password of admin
. You can find the Stitch: Platform Engineer Dashboard
dashboard in the Default
folder.
"},{"location":"api-quickstart/#create-the-missing-dnspolicy","title":"Create the missing DNSPolicy","text":"Create a DNSPolicy that targets the Gateway with the following command:
kubectl --context kind-api-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n geo:\n defaultGeo: EU\nEOF\n
"},{"location":"api-quickstart/#platform-overview","title":"Platform Overview","text":"Since we have created all the policies that Gatekeeper had the guardrails around, you should no longer see any constraints in violation. This can be seen back in the Stitch: Platform Engineer Dashboard
in Grafana at https://grafana.172.31.0.2.nip.io
"},{"location":"api-quickstart/#application-developer-app-setup","title":"(Application developer) App setup","text":""},{"location":"api-quickstart/#api-design","title":"API Design","text":"Fork and/or clone the Petstore App at https://github.com/Kuadrant/api-petstore
git clone git@github.com:kuadrant/api-petstore && cd api-petstore\n# Or if you forked the repository:\n# git clone git@github.com:<your_github_username>/api-petstore && cd api-petstore\n
Then deploy it to the first workload cluster:
kustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-1 apply -f-\n
This will deploy:
- A
petstore
Namespace - A
Secret
, containing a static API key that we'll use later for auth - A
Service
and Deployment
for our petstore app - A Gateway API
HTTPRoute
for our petstore app
"},{"location":"api-quickstart/#route-53-dns-zone","title":"Route 53 DNS Zone","text":"When the DNS Policy has been created, and the previously created HTTPRoute
has been attached, a DNS record custom resource will also be created in the cluster resulting in records being created in your AWS Route53. Navigate to Route53 and you should see some new records in the zone.
"},{"location":"api-quickstart/#configuring-the-region-label","title":"Configuring the region label","text":"Configure the app REGION
to be eu
:
kubectl --context kind-api-workload-1 apply -k ./resources/eu-cluster/\n
"},{"location":"api-quickstart/#exploring-the-open-api-specification","title":"Exploring the Open API Specification","text":"The raw Open API spec can be found in the root of the repo:
cat openapi.yaml\n# ---\n# openapi: 3.0.2\n# info:\n# title: Stitch API Petstore\n# version: 1.0.18\n
"},{"location":"api-quickstart/#application-developer-api-security","title":"(Application developer) API security","text":"We've included a number of sample x-kuadrant
extensions in the OAS spec already:
- At the top-level of our spec, we've defined an
x-kuadrant
extension to detail the Gateway API Gateway associated with our app:
x-kuadrant:\n route:\n name: petstore\n namespace: petstore\n labels:\n deployment: petstore\n owner: cferreir\n hostnames:\n\n - petstore.$KUADRANT_ZONE_ROOT_DOMAIN\n parentRefs:\n - name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n kind: Gateway\n
- In
/user/login
, we have a Gateway API backendRef
set and a rate_limit
set. The rate limit policy for this endpoint restricts usage of this endpoint to 2 requests in a 10 second window: x-kuadrant:\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n - limit: 2\n duration: 10\n unit: second\n
- In
/store/inventory
, we have also have a Gateway API backendRef
set and a rate_limit
set. The rate limit policy for the endpoint restricts usage of this endpoint to 10 requests in a 10 second window: x-kuadrant:\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n - limit: 10\n duration: 10\n unit: second\n
- Finally, we have a
securityScheme
setup for apiKey auth, powered by Authorino. We'll show this in more detail a little later: securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
These extensions allow us to automatically generate Kuadrant Kubernetes resources, including AuthPolicies, RateLimitPolicies and Gateway API resources such as HTTPRoutes.
"},{"location":"api-quickstart/#kuadrantctl","title":"kuadrantctl","text":"kuadrantctl
is a cli that supports the generation of various Kubernetes resources via OAS specs. Let's run some commands to generate some of these resources. If you forked the api-pestore repo, you can check them in also. Let's apply these to our running workload to implement rate limiting and auth.
"},{"location":"api-quickstart/#installing-kuadrantctl","title":"Installing kuadrantctl
","text":"Download kuadrantctl
from the v0.2.0
release artifacts:
https://github.com/Kuadrant/kuadrantctl/releases/tag/v0.2.0
Drop the kuadrantctl
binary somewhere into your $PATH (e.g. /usr/local/bin/
).
For this next part of the tutorial, we recommend installing yq
to pretty-print YAML resources.
"},{"location":"api-quickstart/#generating-kuadrant-resources-with-kuadrantctl","title":"Generating Kuadrant resources with kuadrantctl
","text":"We'll generate an AuthPolicy
to implement API key auth, per the securityScheme
in our OAS spec:
# Generate this resource and save:\nkuadrantctl generate kuadrant authpolicy --oas openapi.yaml | yq -P | tee resources/authpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/authpolicy.yaml\n
Next we'll generate a RateLimitPolicy
, to protect our APIs with the limits we have setup in our OAS spec:
# Generate this resource and save:\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | yq -P | tee resources/ratelimitpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/ratelimitpolicy.yaml\n
Lastly, we'll generate a Gateway API HTTPRoute
to service our APIs:
# Generate this resource and save:\nkuadrantctl generate gatewayapi httproute --oas openapi.yaml | yq -P | tee resources/httproute.yaml\n\n# Apply this resource to our cluster, setting the hostname in via the KUADRANT_ZONE_ROOT_DOMAIN env var:\nkustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-1 apply -f-\n
"},{"location":"api-quickstart/#check-our-applied-policies","title":"Check our applied policies","text":"Navigate to your app's Swagger UI:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/docs/\n
"},{"location":"api-quickstart/#ratelimitpolicy","title":"RateLimitPolicy","text":"Let's check that our RateLimitPolicy
for the /store/inventory
has been applied and works correctly. Recall, our OAS spec had the following limits applied:
x-kuadrant:\n ...\n rate_limit:\n rates:\n\n - limit: 10\n duration: 10\n unit: second\n
Navigate to the /store/inventory
API, click Try it out
, and Execute
. You'll see a response similar to:
{\n \"available\": 10,\n \"pending\": 5,\n \"sold\": 3\n}\n
This API has a rate limit applied, so if you send more than 10 requests in a 10 second window, you will see a 429
HTTP Status code from responses, and a \"Too Many Requests\" message in the response body. Click Execute
quickly in succession to see your RateLimitPolicy
in action.
"},{"location":"api-quickstart/#authpolicy","title":"AuthPolicy","text":"Let's check that our AuthPolicy
for the /store/admin
endpoint has been applied and works correctly. Recall, our OAS spec had the following securitySchemes applied:
securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
Navigate to the /store/admin
API, click Try it out
, and Execute
. You'll get a 401 response.
You can set a value for the api_key
header by clicking Authorize
at the top of the page. Set a value of secret
. This api key value is stored in the petstore-api-key
Secret in the petstore
namespace. Try the /store/admin
endpoint again and you should get a 200 response with the following:
{\"message\":\"You are an admin!\"}\n
"},{"location":"api-quickstart/#policy-adjustments","title":"Policy Adjustments","text":"Run the Swagger UI editor to explore the OAS spec and make some tweaks:
docker run -p 8080:8080 -v $(pwd):/tmp -e SWAGGER_FILE=/tmp/openapi.yaml swaggerapi/swagger-editor\n
You should be able to access the Swagger Editor at http://localhost:8080. Our /store/inventory
API needs some additonal rate limiting. This is one of our slowest, most expensive services, so we'd like to rate limit it further.
In your openapi.yaml
, navigate to the /store/inventory
endpoint in the paths
block. Modify the rate_limit block to further restrict the amount of requests this endpoint can serve to 2 requests per 10 seconds:
x-kuadrant:\n ...\n rate_limit:\n rates:\n\n - limit: 2\n duration: 10\n unit: second\n
Save your updated spec - File
> Save as YAML
> and update your existing openapi.yaml
. You may need to copy the file from your Downloads folder to the location of the petstore repository.
Next we'll re-generate our RateLimitPolicy
with kuadrantctl
:
# Generate this resource and save:\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | yq -P | tee resources/ratelimitpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/ratelimitpolicy.yaml\n
At this stage you can optionally check in all the changes to the repo if you forked it.
# Optionally add, commit & push the changes to your fork\ngit add resources\ngit commit -am \"Generated AuthPolicy,RateLimitPolicy & HTTPRoute\"\ngit push # You may need to set an upstream as well\n
In your app's Swagger UI:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/docs/\n
Navigate to the /store/inventory
API one more, click Try it out
, and Execute
.
You'll see the effects of our new RateLimitPolicy
applied. If you now send more than 2 requests in a 10 second window, you'll be rate-limited.
Note: It may take a few minutes for the updated RateLimitPolicy to be configured with the modified rate limit.
"},{"location":"api-quickstart/#application-developer-scaling-the-application","title":"(Application developer) Scaling the application","text":"Deploy the petstore to the 2nd cluster:
kustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-2 apply -f-\nkubectl --context kind-api-workload-2 apply -f ./resources/authpolicy.yaml\nkubectl --context kind-api-workload-2 apply -f ./resources/ratelimitpolicy.yaml\n
Configure the app REGION
to be us
:
kubectl --context kind-api-workload-2 apply -k ./resources/us-cluster/\n
"},{"location":"api-quickstart/#platform-engineer-scaling-the-gateway-and-traffic-management","title":"(Platform engineer) Scaling the gateway and traffic management","text":"Deploy the Gateway to the 2nd cluster:
kubectl --context kind-api-control-plane patch placement http-gateway --namespace multi-cluster-gateways --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/numberOfClusters\", \"value\":2}]'\n
Label the 1st cluster as being in the 'EU' region, and the 2nd cluster as being in the 'US' region. These labels are used by the DNSPolicy for configuring geo DNS.
kubectl --context kind-api-control-plane label managedcluster kind-api-workload-1 kuadrant.io/lb-attribute-geo-code=EU --overwrite\nkubectl --context kind-api-control-plane label managedcluster kind-api-workload-2 kuadrant.io/lb-attribute-geo-code=US --overwrite\n
"},{"location":"api-quickstart/#api-consumer-accessing-the-api-from-multiple-regions","title":"(API consumer) Accessing the API from multiple regions","text":"Info
This section is optional. If you'd rather skip this part, you can skip forward to the \"(App developer) API traffic monitoring\" section.
"},{"location":"api-quickstart/#pre-requisites","title":"Pre-requisites","text":" python3
and pip3
: these are required for this part of the walkthrough
To demonstrate traffic management by geographical region, we'll use a tool called 'geosight'. This tool resolves hostnames from different regions, fetches a website from the resulting DNS record address and takes a screenshot. The petstore app has been configured to serve a flag image based on which region it is running in. In the 1st cluster, the EU flag is used. In the 2nd cluster, the US flag is used.
To install 'geosight', run the following commands:
git clone git@github.com:jasonmadigan/geosight.git && cd geosight\npip3 install -r requirements.txt\nplaywright install\n
Then run it using:
python3 app.py\n
Access the webapp at http://127.0.0.1:5001/. In the input box, type the address from below and click the Fetch
button:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/server/details\n
After a moment you should see dns results for different regions, and a corresponding screenshot.
If you want to experiment with other regions, check out the Configuration section for geosight and the Kuadrant docs for geo loadbalancing.
"},{"location":"api-quickstart/#app-developer-api-traffic-monitoring","title":"(App developer) API traffic monitoring","text":"To view the App developer dashboard, the same Grafana will be used from the platform engineer steps above: https://grafana.172.31.0.2.nip.io
The most relevant for a app developer is Stitch: App Developer Dashboard
You should see panels about API's including:
- Request and error rates
- API summaries
- API request summaries
- API duration
All corresponding to our HTTPRoute coming from our OAS spec
"},{"location":"api-quickstart/#platform-engineer-apis-summary-view","title":"(Platform Engineer) APIs summary view","text":"Now that the app developer has deployed their app, new metrics and data is now available in the platform engineer dashboard seen in the previous step https://grafana.172.31.0.2.nip.io
:
- Gateways, routes and policies
- Constraints & Violations (there should be no violations present)
- APIs Summary
"},{"location":"api-quickstart/#summary","title":"Summary","text":"You now have a local environment with a reference architecture to design and deploy an API in a kube native way, using Kuadrant and other open source tools.
"},{"location":"api-quickstart/#cleanup","title":"Cleanup","text":"To destroy the previously created kind
clusters, run:
./cleanup.sh\n
Info
DNS records in AWS will remain after cleanup - you can remove these from your zone manually.
"},{"location":"kuadrantctl/","title":"kuadrantctl","text":"kuadrantctl
is a CLI tool for managing Kuadrant configurations and resources.
"},{"location":"kuadrantctl/#installing","title":"Installing","text":"kuadrantctl
can be installed either by downloading pre-compiled binaries or by compiling from source. For most users, downloading the binary is the easiest and recommended method.
"},{"location":"kuadrantctl/#installing-pre-compiled-binaries","title":"Installing Pre-compiled Binaries","text":" - Download the latest binary for your platform from the
kuadrantctl
Releases page. - Unpack the binary.
- Move it to a directory in your
$PATH
so that it can be executed from anywhere.
"},{"location":"kuadrantctl/#compiling-from-source","title":"Compiling from Source","text":"If you prefer to compile from source or are contributing to the project, you can install kuadrantctl
using make install
. This method requires Golang 1.21 or newer.
It is possible to use the make target install
to compile from source. From root of the repository, run
make install\n
This will compile kuadrantctl
and install it in the bin
directory at root of directory. It will also ensure the correct version of the binary is displayed . It can be ran using ./bin/kuadrantctl
.
"},{"location":"kuadrantctl/#usage","title":"Usage","text":"Below is a high-level overview of its commands, along with links to detailed documentation for more complex commands.
"},{"location":"kuadrantctl/#general-syntax","title":"General Syntax","text":"kuadrantctl [command] [subcommand] [flags]\n
"},{"location":"kuadrantctl/#commands-overview","title":"Commands Overview","text":"Command Description completion
Generate autocompletion scripts for the specified shell generate
Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications help
Help about any command version
Print the version number of kuadrantctl
"},{"location":"kuadrantctl/#flags","title":"Flags","text":"Flag Description -h
, --help
Help for kuadrantctl
-v
, --verbose
Enable verbose output"},{"location":"kuadrantctl/#commands-detail","title":"Commands Detail","text":""},{"location":"kuadrantctl/#completion","title":"completion
","text":"Generate an autocompletion script for the specified shell.
Subcommand Description bash
Generate script for Bash fish
Generate script for Fish powershell
Generate script for PowerShell zsh
Generate script for Zsh"},{"location":"kuadrantctl/#generate","title":"generate
","text":"Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications.
Subcommand Description gatewayapi
Generate Gateway API resources kuadrant
Generate Kuadrant resources"},{"location":"kuadrantctl/#generate-gatewayapi","title":"generate gatewayapi
","text":"Generate Gateway API resources from an OpenAPI 3.x specification
Subcommand Description Flags httproute
Generate Gateway API HTTPRoute from OpenAPI 3.0.X --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#generate-kuadrant","title":"generate kuadrant
","text":"Generate Kuadrant resources from an OpenAPI 3.x specification
Subcommand Description Flags authpolicy
Generate a Kuadrant AuthPolicy from an OpenAPI 3.0.x specification --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\") ratelimitpolicy
Generate Kuadrant RateLimitPolicy from an OpenAPI 3.0.x specification --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#version","title":"version
","text":"Print the version number of kuadrantctl
.
No additional flags or subcommands.
"},{"location":"kuadrantctl/#additional-guides","title":"Additional Guides","text":""},{"location":"kuadrantctl/#generating-gateway-api-httproute-objects","title":"Generating Gateway API HTTPRoute Objects","text":" - Generates Gateway API HTTPRoute objects from an OpenAPI Specification (OAS) 3.x.
- Supports reading from a file, URL, or stdin.
- Example usages and more information can be found in the detailed guide.
"},{"location":"kuadrantctl/#generating-kuadrant-authpolicy-objects","title":"Generating Kuadrant AuthPolicy Objects","text":" - Generates Kuadrant AuthPolicy objects for managing API authentication.
- Supports
openIdConnect
and apiKey
types from the OpenAPI Security Scheme Object. - Example usages and more information can be found in the detailed guide.
"},{"location":"kuadrantctl/#generating-kuadrant-ratelimitpolicy-objects","title":"Generating Kuadrant RateLimitPolicy Objects","text":" - Generates Kuadrant RateLimitPolicy objects for managing API rate limiting.
- Supports reading from a file, URL, or stdin.
- Example usages and more information can be found in the detailed guide.
For more detailed information about each command, including options and usage examples, use kuadrantctl [command] --help
.
"},{"location":"kuadrantctl/#using-with-github-actions","title":"Using with GitHub Actions","text":"- name: Install kuadrantctl\n uses: jaxxstorm/action-install-gh-release@v1.10.0\n with: # Grab the latest version\n repo: Kuadrant/kuadrantctl\n
"},{"location":"kuadrantctl/#commands","title":"Commands","text":" - Generate Gateway API HTTPRoute objects from OpenAPI 3.X
- Generate Kuadrant RateLimitPolicy from OpenAPI 3.X
- Generate Kuadrant AuthPolicy from OpenAPI 3.X
"},{"location":"kuadrantctl/#contributing","title":"Contributing","text":"The Development guide describes how to build the kuadrantctl CLI and how to test your changes before submitting a patch or opening a PR.
"},{"location":"kuadrantctl/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"kuadrantctl/doc/development/","title":"Development Guide","text":""},{"location":"kuadrantctl/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - git
- go version 1.21+
"},{"location":"kuadrantctl/doc/development/#build-the-cli","title":"Build the CLI","text":"$ git clone https://github.com/kuadrant/kuadrantctl.git\n$ cd kuadrantctl && make install\n$ bin/kuadrantctl version\n{\"level\":\"info\",\"ts\":\"2023-11-08T23:44:57+01:00\",\"msg\":\"kuadrantctl version: latest\"}\n
"},{"location":"kuadrantctl/doc/development/#quick-steps-to-contribute","title":"Quick steps to contribute","text":" - Fork the project.
- Download your fork to your PC (
git clone https://github.com/your_username/kuadrantctl && cd kuadrantctl
) - Create your feature branch (
git checkout -b my-new-feature
) - Make changes and run tests (
make test
) - Add them to staging (
git add .
) - Commit your changes (
git commit -m 'Add some feature'
) - Push to the branch (
git push origin my-new-feature
) - Create new pull request
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/","title":"Generating Gateway API HTTPRoutes","text":""},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#generate-gateway-api-httproute-object-from-openapi-3","title":"Generate Gateway API HTTPRoute object from OpenAPI 3","text":"The kuadrantctl generate gatewayapi httproute
command generates an Gateway API HTTPRoute from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the cli by one of the following channels:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from stdin standard input stream.
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#usage","title":"Usage","text":"$ kuadrantctl generate gatewayapi httproute -h\nGenerate Gateway API HTTPRoute from OpenAPI 3.0.X\n\nUsage:\n kuadrantctl generate gatewayapi httproute [flags]\n\nFlags:\n -h, --help help for httproute\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Under the example folder there are examples of OAS 3 that can be used to generate the resources
As an AuthPolicy and RateLimitPolicy both require a HTTPRoute to target, the user guides for generating those policies include examples of running the kuadrantctl generate gatewayapi httproute
command.
You can find those guides here:
- Generate Kuadrant AuthPolicy
- Generate Kuadrant RateLimitPolicy
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/","title":"Generating Kuadrant AuthPolicies","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#generate-kuadrant-authpolicy-object-from-openapi-3","title":"Generate Kuadrant AuthPolicy object from OpenAPI 3","text":"The kuadrantctl generate kuadrant authpolicy
command generates an Kuadrant AuthPolicy from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the cli by one of the following channels:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from stdin standard input stream.
OpenAPI Security Scheme Object types
Types Implemented openIdConnect
YES apiKey
YES http
NO oauth2
NO"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openidconnect-type-description","title":"openIdConnect
Type Description","text":"The following OAS example has one protected endpoint GET /dog
with openIdConnect
security scheme type.
paths:\n /dog:\n get:\n operationId: \"getDog\"\n security:\n\n - securedDog: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n securedDog:\n type: openIdConnect\n openIdConnectUrl: https://example.com/.well-known/openid-configuration\n
Running the command
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml | yq -P\n
The generated authpolicy (only relevan fields shown here):
kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\nspec:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v1/dog\n method: GET\n rules:\n authentication:\n getDog_securedDog:\n credentials: {}\n jwt:\n issuerUrl: https://example.com/.well-known/openid-configuration\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /api/v1/dog\n method: GET\n
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#apikey-type-description","title":"apiKey
Type Description","text":"The following OAS example has one protected endpoint GET /dog
with apiKey
security scheme type.
paths:\n /dog:\n get:\n operationId: \"getDog\"\n security:\n\n - securedDog: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n securedDog:\n type: apiKey\n name: dog_token\n in: query\n
Running the command
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml | yq -P\n
The generated authpolicy (only relevan fields shown here):
kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\nspec:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /dog\n method: GET\n rules:\n authentication:\n getDog_securedDog:\n credentials:\n queryString:\n name: dog_token\n apiKey:\n selector:\n matchLabels:\n kuadrant.io/apikeys-by: securedDog\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /dog\n method: GET\n
In this particular example, the endpoint GET /dog
will be protected. The token needs to be in the query string of the request included in a parameter named dog_token
. Kuadrant will validate received tokens against tokens found in kubernetes secrets with label kuadrant.io/apikeys-by: ${sec scheme name}
. In this particular example the label selector will be: kuadrant.io/apikeys-by: securedDog
.
Like the following example:
apiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: securedDog\nstringData:\n api_key: MYSECRETTOKENVALUE\ntype: Opaque\n
Note: Kuadrant validates tokens against api keys found in secrets. The label selector format kuadrant.io/apikeys-by: ${sec scheme name}
is arbitrary and designed for this CLI command.
For more information about Kuadrant auth based on api key: https://docs.kuadrant.io/authorino/docs/user-guides/api-key-authentication/
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#usage","title":"Usage","text":"Generate Kuadrant AuthPolicy from OpenAPI 3.0.X\n\nUsage:\n kuadrantctl generate kuadrant authpolicy [flags]\n\nFlags:\n -h, --help help for authpolicy\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Under the example folder there are examples of OAS 3 that can be used to generate the resources
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#user-guide","title":"User Guide","text":"The verification steps will lead you to the process of deploying and testing the following api with endpoints protected using different security schemes:
Operation Security Scheme GET /api/v1/cat
public (not auth) POST /api/v1/cat
ApiKey in header GET /api/v1/dog
OpenIdConnect GET /api/v1/snake
OpenIdConnect OR ApiKey in query string - [Optional] Setup SSO service supporting OIDC. For this example, we will be using keycloak.
- Create a new realm
petstore
- Create a client
petstore
. In the Client Protocol field, select openid-connect
. - Configure client settings. Access Type to public. Direct Access Grants Enabled to ON (for this example password will be used directly to generate the token).
- Add a user to the realm
- Click the Users menu on the left side of the window. Click Add user.
- Type the username
bob
, set the Email Verified switch to ON, and click Save. - On the Credentials tab, set the password
p
. Enter the password in both the fields, set the Temporary switch to OFF to avoid the password reset at the next login, and click Set Password
.
Now, let's run local cluster to test the kuadrantctl new command to generate authpolicy.
- Clone the repo
git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n
- Setup a cluster, Istio and Gateway API CRDs and Kuadrant
Use our single-cluster quick start script - this will install Kuadrant in a local kind
cluster: https://docs.kuadrant.io/getting-started-single-cluster/
- Build and install CLI in
bin/kuadrantctl
path
make install\n
- Deploy petstore backend API
kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n
- Let's create Petstore's OpenAPI spec
cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.1.0\"\ninfo:\n title: \"Pet Store API\"\n version: \"1.0.0\"\nx-kuadrant:\n route:\n name: \"petstore\"\n namespace: \"petstore\"\n hostnames:\n\n - example.com\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\nservers:\n - url: https://example.io/api/v1\npaths:\n /cat:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # No sec requirements\n operationId: \"getCat\"\n responses:\n 405:\n description: \"invalid input\"\n post: # API key\n operationId: \"postCat\"\n security:\n - cat_api_key: []\n responses:\n 405:\n description: \"invalid input\"\n /dog:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # OIDC\n operationId: \"getDog\"\n security:\n - oidc:\n - read:dogs\n responses:\n 405:\n description: \"invalid input\"\n /snake:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # OIDC or API key\n operationId: \"getSnake\"\n security:\n - oidc: [\"read:snakes\"]\n - snakes_api_key: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n cat_api_key:\n type: apiKey\n name: api_key\n in: header\n oidc:\n type: openIdConnect\n openIdConnectUrl: https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore\n snakes_api_key:\n type: apiKey\n name: snake_token\n in: query\nEOF\n
Replace ${KEYCLOAK_PUBLIC_DOMAIN}
with your SSO instance domain
-
Create an API key only valid for POST /api/v1/cat
endpoint
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: cat-api-key-1\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: cat_api_key\nstringData:\n api_key: I_LIKE_CATS\ntype: Opaque\nEOF\n
Note: the label's value of kuadrant.io/apikeys-by: cat_api_key
is the name of the sec scheme of the OpenAPI spec.
-
Create an API key only valid for GET /api/v1/snake
endpoint
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: snake-api-key-1\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: snakes_api_key\nstringData:\n api_key: I_LIKE_SNAKES\ntype: Opaque\nEOF\n
Note: the label's value of kuadrant.io/apikeys-by: snakes_api_key
is the name of the sec scheme of the OpenAPI spec.
- Create the HTTPRoute using the CLI
bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
- Create Kuadrant's Auth Policy
bin/kuadrantctl generate kuadrant authpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
Now, we are ready to test OpenAPI endpoints
GET /api/v1/cat
-> It's a public endpoint, hence should return 200 Ok
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/cat\"\n
POST /api/v1/cat
-> It's a protected endpoint with apikey
Without any credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"credential not found\"}\ndate: Tue, 28 Nov 2023 22:28:44 GMT\nserver: istio-envoy\ncontent-length: 0\n
The reason headers tell that credential not found
. Credentials satisfying postCat_cat_api_key
authentication is needed.
According to the OpenAPI spec, it should be a header named api_key
. What if we try a wrong token? one token assigned to other endpoint, i.e. I_LIKE_SNAKES
instead of the valid one I_LIKE_CATS
. It should return 401 Unauthorized
.
curl -H \"Host: example.com\" -H \"api_key: I_LIKE_SNAKES\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"the API Key provided is invalid\"}\ndate: Tue, 28 Nov 2023 22:32:55 GMT\nserver: istio-envoy\ncontent-length: 0\n
The reason headers tell that the API Key provided is invalid
. Using valid token (from the secret cat-api-key-1
assigned to POST /api/v1/cats
) in the api_key
header should return 200 Ok
curl -H \"Host: example.com\" -H \"api_key: I_LIKE_CATS\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
GET /api/v1/dog
-> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore
realm)
without credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/dog\"\n
To get the authentication token, this example is using Direct Access Grants oauth2 grant type (also known as Client Credentials grant type). When configuring the Keycloak (OIDC provider) client settings, we enabled Direct Access Grants to enable this procedure. We will be authenticating as bob
user with p
password. We previously created bob
user in Keycloak in the petstore
realm.
export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=petstore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore/protocol/openid-connect/token\" | jq -r '.access_token')\n
Replace ${KEYCLOAK_PUBLIC_DOMAIN}
with your SSO instance domain
With the access token in place, let's try to get those puppies
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/dog -i\n
it should return 200 OK
GET /api/v1/snake
-> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore
realm) OR with apiKey
This example is to show that multiple security requirements (with OR semantics) can be specified for an OpenAPI operation.
Without credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/snake\"\n
With the access token in place, it should return 200 OK (unless the token has expired).
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/snake -i\n
With apiKey it should also work. According to the OpenAPI spec security scheme, it should be a query string named snake_token
and the token needs to be valid token (from the secret snake-api-key-1
assigned to GET /api/v1/snake
)
curl -H 'Host: example.com' -i \"http://127.0.0.1:9080/api/v1/snake?snake_token=I_LIKE_SNAKES\"\n
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/","title":"Generating Kuadrant RateLimitPolicies","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#generate-kuadrant-ratelimitpolicy-object-from-openapi-3","title":"Generate Kuadrant RateLimitPolicy object from OpenAPI 3","text":"The kuadrantctl generate kuadrant ratelimitpolicy
command generates a Kuadrant RateLimitPolicy from your OpenAPI Specification (OAS) 3.x document powered with Kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the Kuadrant CLI in one of the following ways:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from
stdin
standard input stream.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#usage","title":"Usage","text":"Generate Kuadrant RateLimitPolicy from OpenAPI 3.0.x\n\nUsage:\n kuadrantctl generate kuadrant ratelimitpolicy [flags]\n\nFlags:\n -h, --help help for ratelimitpolicy\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Note: The kuadrantctl/examples
directory in GitHub includes sample OAS 3 files that you can use to generate the resources.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#procedure","title":"Procedure","text":" -
Clone the Git repository as follows:
git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n ```\n2. Set up a cluster, Istio and Gateway API CRDs, and Kuadrant as follows: \n\n\n* Use the single-cluster quick start script to install Kuadrant in a local `kind` cluster: https://docs.kuadrant.io/getting-started-single-cluster/.\n\n\n3. Build and install the CLI in `bin/kuadrantctl` path as follows:\n```bash\nmake install\n
-
Deploy the Petstore backend API as follows:
kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n
-
Create the Petstore OpenAPI definition as follows:
cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.0.3\"\ninfo:\n title: \"Pet Store API\"\n version: \"1.0.0\"\nx-kuadrant: ## Root-level Kuadrant extension\n route:\n name: \"petstore\"\n namespace: \"petstore\"\n hostnames:\n\n - example.com\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\nservers:\n - url: https://example.io/v1\npaths:\n /cat:\n x-kuadrant: ## Path-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit:\n rates:\n - limit: 1\n duration: 10\n unit: second\n counters:\n - request.headers.x-forwarded-for\n get: # Added to the route and rate limited\n operationId: \"getCat\"\n responses:\n 405:\n description: \"invalid input\"\n post: # NOT added to the route\n x-kuadrant: \n disable: true\n operationId: \"postCat\"\n responses:\n 405:\n description: \"invalid input\"\n /dog:\n get: # Added to the route and rate limited\n x-kuadrant: ## Operation-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit:\n rates:\n - limit: 3\n duration: 10\n unit: second\n counters:\n - request.headers.x-forwarded-for\n operationId: \"getDog\"\n responses:\n 405:\n description: \"invalid input\"\n post: # Added to the route and NOT rate limited\n x-kuadrant: ## Operation-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n operationId: \"postDog\"\n responses:\n 405:\n description: \"invalid input\"\nEOF\n
Note: The servers
base path is not included. This is work-in-progress in follow-up PRs.
Operation Applied configuration GET /cat
Should return 200 OK and be rate limited (1 req / 10 seconds). POST /cat
Not added to the HTTPRoute. Should return 404 Not Found. GET /dog
Should return 200 OK and be rate limited (3 req / 10 seconds). POST /dog
Should return 200 OK and NOT rate limited. -
Create the HTTPRoute by using the CLI as follows:
bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
-
Create the rate limit policy as follows:
bin/kuadrantctl generate kuadrant ratelimitpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
-
Test the OpenAPI endpoints as follows:
-
GET /cat
- Should return 200 OK and be rate limited (1 req / 10 seconds).
curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/cat\"\n
POST /cat
- Not added to the HTTPRoute. Should return 404 Not Found. curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/cat\"\n
GET /dog
- Should return 200 OK and be rate limited (3 req / 10 seconds).
curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/dog\"\n
POST /dog
- Should return 200 OK and NOT rate limited.
curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/dog\"\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/","title":"kuadrantctl - CI/CD with Tekton and Argo CD","text":"This guide demonstrates setting up a CI/CD pipeline by using Tekton to deploy Kubernetes Gateway API and Kuadrant resources generated by kuadrantctl
, from an OpenAPI definition. In this example, these resources are applied directly to the cluster where Tekton is running.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#prerequisites","title":"Prerequisites","text":" - Kuadrant, and all of its prerequisites, installed on a Kubernetes or OpenShift cluster.
- Tekton Pipelines installed on your cluster.
kubectl
configured with access to communicate with your cluster. - Optional: Tekton CLI
tkn
for easier interaction with Tekton resources.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-1-set-up-your-namespace","title":"Step 1 - Set up your namespace","text":"Create a dedicated namespace as follows:
kubectl create namespace petstore\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-2-create-a-persistent-volume-claim","title":"Step 2 - Create a Persistent Volume Claim","text":"For this example, to store associated Tekton build artifacts, create a Persistent Volume Claim (PVC) in the petstore
namespace as follows:
kubectl apply -n petstore -f - <<EOF\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: tekton-kuadrantctl-pvc\n namespace: petstore\nspec:\n accessModes:\n\n - ReadWriteOnce\n resources:\n requests:\n storage: 1Gi\nEOF\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-3-define-the-tekton-task","title":"Step 3 - Define the Tekton Task","text":"Define the task that outlines steps to clone a repository, generate Kuadrant and Kubernetes resources by using kuadrantctl
, and apply them directly to the cluster as follows:
kubectl apply -f - <<'EOF'\napiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\n name: run-kuadrantctl\n namespace: petstore\nspec:\n params:\n\n - name: gitRepoUrl\n description: URL of the git repository to clone\n - name: gitRevision\n description: Git revision to checkout (branch, tag, sha)\n workspaces:\n - name: source\n description: Workspace to checkout the git repo\n - name: kubeconfig\n description: Workspace containing kubeconfig for Kubernetes cluster access\n steps:\n - name: clean-workspace\n image: alpine:latest\n script: |\n sh -c 'rm -rf $(workspaces.source.path)/* $(workspaces.source.path)/.[!.]* $(workspaces.source.path)/..?*'\n - name: clone\n image: alpine/git:latest\n script: |\n git clone $(params.gitRepoUrl) $(workspaces.source.path)\n cd $(workspaces.source.path)\n git checkout $(params.gitRevision)\n - name: download-kuadrantctl\n image: curlimages/curl:latest\n script: |\n ARCH=$(uname -m)\n case $ARCH in\n x86_64) BIN_ARCH=\"amd64\";;\n arm64) BIN_ARCH=\"arm64\";;\n aarch64) BIN_ARCH=\"arm64\";;\n *) echo \"Unsupported architecture: $ARCH\" && exit 1 ;;\n esac\n cd $(workspaces.source.path)\n curl -LO \"https://github.com/Kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\"\n tar -xzf kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\n - name: run-kuadrantctl\n image: alpine:latest\n script: |\n cd $(workspaces.source.path)\n mkdir -p generated-resources\n ./kuadrantctl generate kuadrant authpolicy --oas openapi.yaml | tee generated-resources/authpolicy.yaml\n ./kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | tee generated-resources/ratelimitpolicy.yaml\n ./kuadrantctl generate gatewayapi httproute --oas openapi.yaml | tee generated-resources/httproute.yaml\n - name: apply-resources\n image: bitnami/kubectl\n script: |\n cd $(workspaces.source.path)\n export KUADRANT_ZONE_ROOT_DOMAIN=example.com # domain name used in the HTTPRoute for the petstore sample app\n for file in ./generated-resources/*.yaml; do\n envsubst < \"$file\" | kubectl apply -n petstore -f - \n done\nEOF\n
Note: This example uses Tekton with kubectl
to apply resources to a cluster. It is best to use a tool such as Argo CD to implement continuous delivery by using a GitOps approach. In this scenario, you would do the following:
- Use
kuadrantctl
to generate Kubernetes and Kuadrant resources as part a Tekton pipeline. - Commit these new resources to a Git repository.
- Use ArgoCD to sync these changes from the Git repository to a Kubernetes or OpenShift cluster.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-4-create-a-kubeconfig-secret","title":"Step 4 - Create a Kubeconfig secret","text":"Important: While this guide uses a kubeconfig
secret for simplicity, do not use this in production environments. Instead, use a service account for enhanced security.
This example uses a kubeconfig
secret and role bindings to demonstrate how to provide access for pushing generated resources to a cluster. However, for production setups, employing a service account is best.
To proceed, create a kubeconfig
secret in the petstore
namespace to provide Tekton with access to your Kubernetes cluster as follows:
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=/path/to/.kube/config -n petstore\n
Create an associated ClusterRole
and ClusterRoleBinding
as follows:
kubectl apply -n petstore -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: kuadrant-ci-example-full-access\nrules:\n\n- apiGroups: [\"*\"]\n resources: [\"*\"]\n verbs: [\"*\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: kuadrant-ci-example-full-access-binding\nsubjects:\n- kind: ServiceAccount\n name: default\n namespace: petstore\nroleRef:\n kind: ClusterRole\n name: kuadrant-ci-example-full-access\n apiGroup: rbac.authorization.k8s.io\nEOF\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-5-trigger-the-taskrun","title":"Step 5 - Trigger the TaskRun","text":"Execute the task from the petstore
namespace, referencing the kubeconfig
secret for cluster access as follows:
This example runs this task with the Kuadrant Petstore app: https://github.com/kuadrant/api-petstore.
kubectl apply -n petstore -f - <<EOF\napiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\n name: run-kuadrantctl-taskrun\n namespace: petstore\nspec:\n taskRef:\n name: run-kuadrantctl\n params:\n\n - name: gitRepoUrl\n value: \"https://github.com/kuadrant/api-petstore.git\"\n - name: gitRevision\n value: \"main\"\n workspaces:\n - name: source\n persistentVolumeClaim:\n claimName: tekton-kuadrantctl-pvc\n - name: kubeconfig\n secret:\n secretName: kubeconfig-secret\nEOF\n
If you have tkn
installed, you can easily view the progress of the pipe run as follows:
tkn taskrun list -n petstore\nNAME STARTED DURATION STATUS\nrun-kuadrantctl-taskrun 12 seconds ago --- Running(Pending)\n
tkn taskrun logs -n petstore -f\n\n\n[clone] Cloning into '/workspace/source'...\n[clone] Already on 'main'\n[clone] Your branch is up to date with 'origin/main'.\n\n[download-kuadrantctl] % Total % Received % Xferd Average Speed Time Time Time Current\n[download-kuadrantctl] Dload Upload Total Spent Left Speed\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n100 21.4M 100 21.4M 0 0 6601k 0 0:00:03 0:00:03 --:--:-- 8756k\n\n[run-kuadrantctl] {\"kind\":\"AuthPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}],\"rules\":{\"authentication\":{\"storeAdmin_api_key\":{\"credentials\":{\"customHeader\":{\"name\":\"api_key\"}},\"apiKey\":{\"selector\":{\"matchLabels\":{\"kuadrant.io/apikeys-by\":\"api_key\"}}},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}]}}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"RateLimitPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"limits\":{\"getInventory\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":10,\"duration\":10,\"unit\":\"second\"}]},\"loginUser\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":2,\"duration\":10,\"unit\":\"second\"}]}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"HTTPRoute\",\"apiVersion\":\"gateway.networking.k8s.io/v1beta1\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"parentRefs\":[{\"kind\":\"Gateway\",\"namespace\":\"kuadrant-multi-cluster-gateways\",\"name\":\"prod-web\"}],\"hostnames\":[\"petstore.${KUADRANT_ZONE_ROOT_DOMAIN}\"],\"rules\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]}]},\"status\":{\"parents\":null}}\n\n[apply-resources] authpolicy.kuadrant.io/petstore created\n[apply-resources] httproute.gateway.networking.k8s.io/petstore created\n[apply-resources] ratelimitpolicy.kuadrant.io/petstore created\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-6-cleanup","title":"Step 6 - Cleanup","text":"Clean up your resources as follows:
- Remove the
petstore
namespace: kubectl delete ns petstore
- Remove the
ClusterRole
and ClusterRoleBinding
: kubectl delete clusterrole kuadrant-ci-example-full-access
kubectl delete clusterrolebinding kuadrant-ci-example-full-access-binding
"},{"location":"kuadrantctl/doc/openapi-apicurio/","title":"Using Apicurio Studio with Kuadrant OAS extensions","text":"You can use OpenAPI extensions to define extra functionality beyond what is covered by the standard OpenAPI specification. Extensions typically start with the x-
prefix, for example, x-codegen
. Kuadrant OpenAPI extensions start with the x-kuadrant
prefix, and allow you to configure Kuadrant policy information alongside your API.
Apicurio Studio is a UI tool for visualizing and editing OpenAPI designs and definitions, which can visualize security details and custom extensions specified in your OpenAPI definition.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#prerequisites","title":"Prerequisites","text":" - You have Apicurio Studio installed and running. For more information, see the Apicurio Studio documentation.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-apicurio/#step-1-access-your-openapi-definition-in-apicurio-studio","title":"Step 1 - Access your OpenAPI definition in Apicurio Studio","text":"Open or import your OpenAPI definition in Apicurio Studio. On the Design tab, select the VENDOR-EXTENSiONS section to add an extension. Alternatively, you can use the Source tab to edit the API definition directly.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#step-2-add-kuadrant-extensions-to-your-api-definition","title":"Step 2 - Add Kuadrant extensions to your API definition","text":"The following configuration and extension points are supported by Apicurio Studio and the kuadrantctl
CLI:
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-http-route","title":"Generate an HTTP route","text":"To generate an HTTPRoute for the API, add the following x-kuadrant
block to your API definition in Apicurio Studio, replacing values to match your API details and the location of your Gateway:
x-kuadrant:\n route:\n name: petstore\n namespace: petstore\n hostnames:\n\n - 'petstore.example.com'\n parentRefs:\n - name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n kind: Gateway\n
For more details, see Generate Gateway API HTTPRoute object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"To generate an AuthPolicy, add a securityScheme
to the components
block in your API definition. The following securityScheme
requires that an API key header is set:
securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
Although securityScheme
is not an OpenAPI extension, it is used by kuadrantctl
like the other extensions mentioned in this document.
When added, Apicurio Studio will display the following update in the SECURITY SCHEMES section:
For more details, see Generate Kuadrant AuthPolicy object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"To generate a RateLimitPolicy for the API, add the following x-kuadrant
block to a path in your API definition, replacing values to match your API details.
paths:\n /:\n x-kuadrant:\n backendRefs:\n -\n name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n -\n limit: 10\n duration: 10\n unit: second\n
When added, Apicurio Studio will display the following update in the VENDOR-EXTENSiONS section for that specific path:
For more details, see Generate Kuadrant RateLimitPolicy object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#additional-resources","title":"Additional resources","text":" - OpenAPI 3.0.x Kuadrant Extensions in the kuadrantctl documentation.
- Apicurio Studio - Now with OpenAPI Vendor Extensions.
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/","title":"OpenAPI 3.0.x Kuadrant extensions","text":"This reference information shows examples of how to add Kuadrant extensions at the root, path, or operation level in an OpenAPI 3.0.x definition.
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#root-level-kuadrant-extension","title":"Root-level Kuadrant extension","text":"You can add a Kuadrant extension at the root level of an OpenAPI definition. The following example shows an extension added for a petstore
app:
x-kuadrant:\n route: ## HTTPRoute metadata\n name: \"petstore\"\n namespace: \"petstore\"\n labels: ## map[string]string\n deployment: petstore\n hostnames: ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n - example.com\n parentRefs: ## []gateway.networking.k8s.io/v1beta1.ParentReference\n - name: apiGateway\n namespace: gateways\n
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#path-level-kuadrant-extension","title":"Path-level Kuadrant extension","text":"You can add a Kuadrant extension at the path level of an OpenAPI definition. This configuration at the path level is the default when there is no operation-level configuration. The following example shows an extension added for a /cat
path:
paths:\n /cat:\n x-kuadrant: ## Path-level Kuadrant extension\n disable: true ## Remove from the HTTPRoute. Optional. Default: false\n pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact\n backendRefs: ## Backend references to be included in the HTTPRoute. []gateway.networking.k8s.io/v1beta1.HTTPBackendRef. Optional.\n\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit: ## Rate limit configuration. Optional.\n rates: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n - limit: 1\n duration: 10\n unit: second\n counters: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n - auth.identity.username\n when: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#operation-level-kuadrant-extension","title":"Operation-level Kuadrant extension","text":"You can add a Kuadrant extension at the operation level of an OpenAPI definition. This extension uses the same schema as the path-level Kuadrant extension. The following example shows an extension added for a get
operation:
paths:\n /cat:\n get:\n x-kuadrant: ## Operation-level Kuadrant extension\n disable: true ## Remove from the HTTPRoute. Optional. Default: path level \"disable\" value.\n pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact.\n backendRefs: ## Backend references to be included in the HTTPRoute. Optional.\n\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit: ## Rate limit configuration. Optional.\n rates: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n - limit: 1\n duration: 10\n unit: second\n counters: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n - auth.identity.username\n when: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/","title":"Integrating Kuadrant OAS extensions with Red Hat OpenShift Dev Spaces","text":"OpenAPI extensions enhance the standard OpenAPI specification by adding custom functionality. Kuadrant OpenAPI extensions are identified by the x-kuadrant
prefix. You can use OpenAPI extensions to integrate Kuadrant policies directly into your API definitions.
Red Hat OpenShift Dev Spaces provides a browser-based, cloud-native IDE that supports rapid and decentralized development in container-based environments. This tutorial demonstrates how to use OpenShift Dev Spaces to modify an OpenAPI definition by incorporating Kuadrant policies, and then use the kuadrantctl
CLI to create Kubernetes resources for both Gateway API and Kuadrant.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#prerequisites","title":"Prerequisites","text":" -
You must have access to one of the following Dev Spaces instances:
-
A self-hosted OpenShift Dev Spaces instance.
- An OpenShift Dev Spaces instance provided by the Red Hat Developer Sandbox.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-1-setting-up-your-workspace","title":"Step 1 - Setting up your workspace","text":"Create a workspace in Dev Spaces for your project as follows:
- Fork the following repository: https://github.com/Kuadrant/blank-petstore.
- In Dev Spaces, select Create Workspace, and enter the URL of your forked repository. For example:
https://github.com/<your-username>/blank-petstore.git
. - Click Create & Open.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-2-configuring-vs-code-in-dev-spaces","title":"Step 2 - Configuring VS Code in Dev Spaces","text":"For this tutorial, you will perform the following tasks:
- Install
kuadrantctl
in your workspace to demonstrate Kubernetes resource generation from your modified OpenAPI definition. - Optional: Configure Git with your username and email to enable pushing changes back to your repository.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#install-the-kuadrantctl-cli","title":"Install the kuadrantctl CLI","text":"To install kuadrantctl
in your Dev Spaces workspace, enter the following command:
curl -sL \"https://github.com/kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-amd64.tar.gz\" | tar xz -C /home/user/.local/bin\n
This command installs kuadrantctl
in /home/user/.local/bin
, which is included in the container's $PATH
by default.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#optional-configuring-git","title":"Optional: Configuring Git","text":"If you plan to push changes back to your repository, configure your Git username and email as follows:
git config --global user.email \"foo@example.com\"\ngit config --global user.name \"Foo Example\"\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-3-adding-kuadrant-policies-to-your-openapi-definition","title":"Step 3 - Adding Kuadrant policies to your OpenAPI definition","text":"After creating your workspace, Dev Spaces will launch VS Code loaded with your forked repository. Navigate to the openapi.yaml
file in the sample app to begin modifications.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#kuadrant-policies-overview","title":"Kuadrant policies overview","text":"You will enhance your API definition by applying Kuadrant policies to the following endpoints:
/pet/findByStatus
/user/login
/store/inventory
In this tutorial, you will add Kuadrant policies to your API definition as follows:
- Generate an
HTTPRoute
to expose these three routes for an existing Gateway
. - Add API key authentication for the
/user/login
route, using a Kuadrant AuthPolicy
and OAS securitySchemes
. - Add a Kuadrant
RateLimitPolicy
to the /store/inventory
endpoint, to limit the amount of requests this endpoint can receive.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#defining-a-gateway","title":"Defining a Gateway","text":"Use the x-kuadrant
extension in the root level to specify a Gateway
. This information will be used to generate HTTPRoute
s at the path level. For example:
x-kuadrant:\n route: ## HTTPRoute metadata\n name: \"petstore\"\n namespace: \"petstore\"\n labels: ## map[string]string\n deployment: petstore\n hostnames: ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n - example.com\n parentRefs: ## []gateway.networking.k8s.io/v1beta1.ParentReference\n - name: apiGateway\n namespace: gateways\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#specifying-httproutes-for-each-path","title":"Specifying HTTPRoutes for each path","text":"For each path, add an x-kuadrant
extension with backendRefs
to link your routes to your paths as follows:
/pet/findByStatus:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
/user/login:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
/store/inventory:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
Note: The x-kuadrant
extension at the path level applies to all HTTP methods defined in the path. For method-specific policies, move the extension inside the relevant HTTP method block, for example, get
or post
.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#implementing-authpolicy-and-security-schemes","title":"Implementing AuthPolicy and security schemes","text":"To secure the /user/login
endpoint with API key authentication, use the following configuration:
/user/login:\n # ...\n get:\n security:\n\n - api_key: []\n
components:\n schemas:\n # ...\n securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
This configuration generates an AuthPolicy
that references an API key stored in a labeled Secret
:
apiVersion: v1\nkind: Secret\nmetadata:\n name: petstore-api-key\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: api_key\nstringData:\n api_key: secret\ntype: Opaque\n
For simplicity, this example uses a simple, static API key for your app."},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#applying-a-ratelimitpolicy-to-an-endpoint","title":"Applying a RateLimitPolicy to an endpoint","text":"To enforce rate limiting on the /store/inventory
endpoint, add the following x-kuadrant
extension:
/store/inventory:\n get:\n # ...\n x-kuadrant:\n backendRefs:\n # ...\n rate_limit:\n rates:\n\n - limit: 10\n duration: 10\n unit: second\n
This limits to 10 requests every 10 seconds for the /store/inventory
endpoint.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-4-generate-kubernetes-resources-by-using-kuadrantctl","title":"Step 4 - Generate Kubernetes resources by using kuadrantctl","text":"With your extensions in place, you can use kuadrantctl
to generate the follollowing Kubernetes resources:
- An
HTTPRoute
for your petstore
app for each of your endpoints. - An
AuthPolicy
with a simple, static API key from a secret for the /user/login
endpoint. - A
RateLimitPolicy
with a rate limit of 10 requests every 10 seconds for the /store/inventory
endpoint.
In Dev Spaces, select \u2630 > Terminal > New Terminal, and run the following commands:
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-httproute","title":"Generate an HTTPRoute","text":"kuadrantctl generate gatewayapi httproute --oas openapi.yaml\n
This command outputs the following HTTPRoute
:
kind: HTTPRoute\napiVersion: gateway.networking.k8s.io/v1beta1\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n parentRefs:\n\n - namespace: gateways\n name: apiGateway\n hostnames:\n - example.com\n rules:\n - matches:\n - path:\n type: Exact\n value: /api/v3/pet/findByStatus\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n - matches:\n - path:\n type: Exact\n value: /api/v3/store/inventory\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\nstatus:\n parents: null\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"kuadrantctl generate kuadrant authpolicy --oas openapi.yaml\n
This command outputs the following AuthPolicy
:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: petstore\n namespace: petstore\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\n rules:\n authentication:\n GETuserlogin_api_key:\n credentials:\n customHeader:\n name: api_key\n apiKey:\n selector:\n matchLabels:\n kuadrant.io/apikeys-by: api_key\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\nstatus: {}\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml\n
This command outputs the following RateLimitPolicy
:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: petstore\n namespace: petstore\n limits:\n GETstoreinventory:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v3/store/inventory\n method: GET\n rates:\n - limit: 10\n duration: 10\n unit: second\nstatus: {}\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-5-applying-resources-to-the-app","title":"Step 5 - Applying resources to the app","text":"Note: By default, the oc
and kubectl
commands in Dev Spaces target the cluster running Dev Spaces. If you want to apply resources to another cluster, you must log in with oc
or kubectl
to another cluster, and pass a different --context
to these commands to apply resources to another cluster.
You can now apply these policies to a running app by using kubectl
or oc
. If Dev Spaces is running on a cluster where Kuadrant is also installed, you can apply these resources as follows:
kuadrantctl generate gatewayapi httproute --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant authpolicy --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | kubectl apply -f -\n
Alternatively, you can use kuadrantctl
as part of a CI/CD pipeline. For more details, see the kuadrantctl CI/CD guide.
If you completed the optional Git configuration step, you can enter git commit
to commit the these changes and push them to your fork.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#additional-resources","title":"Additional resources","text":"For more details, see the following documentation on using x-kuadrant
OAS extensions with kuadrantctl
:
- OpenAPI 3.0.x Kuadrant extensions
- Generate Gateway API HTTPRoutes with
kuadrantctl
- Generate Kuadrant AuthPolicy with
kuadrantctl
- Generate Kuadrant RateLimitPolicy with
kuadrantctl
- kuadrantctl CI/CD guide
"},{"location":"dns-operator/","title":"DNS Operator","text":"The DNS Operator is a kubernetes based controller responsible for reconciling DNS Record and Managed Zone custom resources. It interfaces with cloud DNS providers such as AWS and Google to bring the DNS zone into the state declared in these CRDs. One of the key use cases the DNS operator solves, is allowing complex DNS routing strategies such as Geo and Weighted to be expressed allowing you to leverage DNS as the first layer of traffic management. In order to make these strategies valuable, it also works across multiple clusters allowing you to use a shared domain name balance traffic based on your requirements.
"},{"location":"dns-operator/#getting-started","title":"Getting Started","text":""},{"location":"dns-operator/#pre-setup","title":"Pre Setup","text":""},{"location":"dns-operator/#add-dns-provider-configuration","title":"Add DNS provider configuration","text":"NOTE: You can optionally skip this step but at least one ManagedZone will need to be configured and have valid credentials linked to use the DNS Operator.
"},{"location":"dns-operator/#aws-provider-route53","title":"AWS Provider (Route53)","text":"make local-setup-aws-mz-clean local-setup-aws-mz-generate AWS_ZONE_ROOT_DOMAIN=<MY AWS Zone Root Domain> AWS_DNS_PUBLIC_ZONE_ID=<My AWS DNS Public Zone ID> AWS_ACCESS_KEY_ID=<My AWS ACCESS KEY> AWS_SECRET_ACCESS_KEY=<My AWS Secret Access Key>\n
More details about the AWS provider can be found here"},{"location":"dns-operator/#gcp-provider","title":"GCP Provider","text":"make local-setup-gcp-mz-clean local-setup-gcp-mz-generate GCP_ZONE_NAME=<My GCP ZONE Name> GCP_ZONE_DNS_NAME=<My Zone DNS Name> GCP_GOOGLE_CREDENTIALS='<My GCP Credentials.json>' GCP_PROJECT_ID=<My GCP PROJECT ID>\n
More details about the GCP provider can be found here"},{"location":"dns-operator/#running-controller-locally-default","title":"Running controller locally (default)","text":" -
Create local environment(creates kind cluster)
make local-setup\n
-
Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
make run\n
"},{"location":"dns-operator/#running-controller-on-the-cluster","title":"Running controller on the cluster","text":" -
Create local environment(creates kind cluster)
make local-setup DEPLOY=true\n
-
Verify controller deployment
kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n
"},{"location":"dns-operator/#running-controller-on-existing-cluster","title":"Running controller on existing cluster","text":"You\u2019ll need a Kubernetes cluster to run against. You can use KIND to get a local cluster for testing, or run against a remote cluster. Note: Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster kubectl cluster-info
shows).
-
Apply Operator manifests
kustomize build config/default | kubectl apply -f -\n
-
Verify controller deployment
kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n
"},{"location":"dns-operator/#development","title":"Development","text":""},{"location":"dns-operator/#e2e-test-suite","title":"E2E Test Suite","text":"The e2e test suite can be executed against any cluster running the DNS Operator with configuration added for any supported provider.
make test-e2e TEST_DNS_MANAGED_ZONE_NAME=<My managed zone name> TEST_DNS_ZONE_DOMAIN_NAME=<My domain name> TEST_DNS_NAMESPACE=<My test namesapace> TEST_DNS_PROVIDER=<aws|gcp>\n
Environment Variable Description TEST_DNS_MANAGED_ZONE_NAME Name of the managed zone relevant for the test domain (TEST_DNS_ZONE_DOMAIN_NAME). If using local-setup Managed zones, one of [dev-mz-aws; dev-mz-gcp] TEST_DNS_ZONE_DOMAIN_NAME Domain name being used for the test, must match the domain of the managed zone (TEST_DNS_MANAGED_ZONE_NAME) TEST_DNS_NAMESPACE The namespace to run the test in, must be the same namespace as the TEST_DNS_MANAGED_ZONE_NAME TEST_DNS_PROVIDER DNS Provider currently being tested, one of [aws; gcp]"},{"location":"dns-operator/#modifying-the-api-definitions","title":"Modifying the API definitions","text":"If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
make manifests\n
NOTE: Run make --help
for more information on all potential make
targets
More information can be found via the Kubebuilder Documentation
"},{"location":"dns-operator/#license","title":"License","text":"Copyright 2024.
Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0\n
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"},{"location":"dns-operator/docs/RELEASE/","title":"RELEASE","text":""},{"location":"dns-operator/docs/RELEASE/#release","title":"Release","text":""},{"location":"dns-operator/docs/RELEASE/#new-majorminor-version","title":"New Major.Minor version","text":" - Create a new minor release branch from the HEAD of main:
git checkout -b release-0.2\n
- Run prepare release:
make prepare-release IMG_TAG=release-0.2 VERSION=0.2.0-dev CHANNELS=alpha REPLACES_VERSION=0.1.0\n
- Verify local changes, commit and push:
git add .\ngit commit -m \"prepare-release: release-0.2\"\ngit push upstream release-0.2\n
-
Verify that the build image workflow is triggered and completes for the new branch
-
Do any final testing and bug fixing against the release branch, see Verify OLM Deployment
-
Run prepare release for final version
make prepare-release VERSION=0.2.0 CHANNELS=stable REPLACES_VERSION=0.1.0\n
- Verify local changes, commit, push and tag:
git add .\ngit commit -m \"prepare-release: v0.2.0\"\ngit tag v0.2.0\ngit push upstream release-0.2\ngit push upstream v0.2.0\n
-
Verify that the build image workflow is triggered and completes for the new tag
-
Verify the new version can be installed from the catalog image, see Verify OLM Deployment
-
Release to the community operator index catalogs.
"},{"location":"dns-operator/docs/RELEASE/#new-patch-version","title":"New Patch version","text":" - Checkout minor release branch:
git checkout release-0.2\n
- Run prepare release:
make prepare-release VERSION=0.2.1 CHANNELS=stable REPLACES_VERSION=0.2.0\n
- Verify local changes, commit and push:
git add .\ngit commit -m \"prepare-release: v0.2.1\"\ngit tag v0.2.1\ngit push upstream release-0.2\ngit push upstream v0.2.1\n
-
Verify that the build image workflow is triggered and completes for the new tag
-
Verify the new version can be installed from the catalog image, see Verify OLM Deployment
-
Release to the community operator index catalogs.
"},{"location":"dns-operator/docs/RELEASE/#verify-olm-deployment","title":"Verify OLM Deployment","text":" -
Deploy the OLM catalog image:
make local-setup install-olm deploy-catalog\n
-
Wait for deployment:
kubectl -n dns-operator-system wait --timeout=60s --for=condition=Available deployments --all\ndeployment.apps/dns-operator-controller-manager condition met\n
-
Check the logs:
kubectl -n dns-operator-system logs -f deployment/dns-operator-controller-manager\n
-
Check the version:
$ kubectl -n dns-operator-system get deployment dns-operator-controller-manager --show-labels\nNAME READY UP-TO-DATE AVAILABLE AGE LABELS\ndns-operator-controller-manager 1/1 1 1 5m42s app.kubernetes.io/component=manager,app.kubernetes.io/created-by=dns-operator,\napp.kubernetes.io/instance=controller-manager,app.kubernetes.io/managed-by=kustomize,app.kubernetes.io/name=deployment,app.kubernetes.io/part-of=dns-operator,\ncontrol-plane=dns-operator-controller-manager,olm.deployment-spec-hash=1jPe8AuMpSKHh51nnDs4j25ZgoUrKhF45EP0Wa,olm.managed=true,olm.owner.kind=ClusterServiceVersion,\nolm.owner.namespace=dns-operator-system,olm.owner=dns-operator.v0.2.0-dev,operators.coreos.com/dns-operator.dns-operator-system=\n
"},{"location":"dns-operator/docs/RELEASE/#community-operator-index-catalogs","title":"Community Operator Index Catalogs","text":" - Operatorhub Community Operators
- Openshift Community Operators
"},{"location":"dns-operator/docs/managedzone/","title":"Creating and using a ManagedZone resource.","text":""},{"location":"dns-operator/docs/managedzone/#what-is-a-managedzone","title":"What is a ManagedZone","text":"A ManagedZone is a reference to a DNS zone. By creating a ManagedZone we are instructing the MGC about a domain or subdomain that can be used as a host by any gateways in the same namespace. These gateways can use a subdomain of the ManagedZone.
If a gateway attempts to a use a domain as a host, and there is no matching ManagedZone for that host, then that host on that gateway will fail to function.
A gateway's host will be matched to any ManagedZone that the host is a subdomain of, i.e. test.api.hcpapps.net
will be matched by any ManagedZone (in the same namespace) of: test.api.hcpapps.net
, api.hcpapps.net
or hcpapps.net
.
When MGC wants to create the DNS Records for a host, it will create them in the most exactly matching ManagedZone. e.g. given the zones hcpapps.net
and api.hcpapps.net
the DNS Records for the host test.api.hcpapps.net
will be created in the api.hcpapps.net
zone.
"},{"location":"dns-operator/docs/managedzone/#private-and-public-zones","title":"Private and Public Zones","text":"Some DNS providers offer private zones. While this is something we will want to support in the future, we currently only support public zones.
"},{"location":"dns-operator/docs/managedzone/#delegation","title":"Delegation","text":"Delegation allows you to give control of a subdomain of a root domain to MGC while the root domain has it's DNS zone elsewhere.
In the scenario where a root domain has a zone outside Route53, e.g. external.com
, and a ManagedZone for delegated.external.com
is required, the following steps can be taken:
- Create the ManagedZone for
delegated.external.com
and wait until the status is updated with an array of nameservers (e.g. ns1.hcpapps.net
, ns2.hcpapps.net
). - Copy these nameservers to your root zone for
external.com
, you can create a NS record for each nameserver against the delegated.external.com
record.
For example:
delegated.external.com. 3600 IN NS ns1.hcpapps.net.\ndelegated.external.com. 3600 IN NS ns2.hcpapps.net.\n
Now, when MGC creates a DNS record in it's Route53 zone for delegated.external.com
, it will be resolved correctly.
"},{"location":"dns-operator/docs/managedzone/#creating-a-managedzone","title":"Creating a ManagedZone","text":"To create a ManagedZone
, you will first need to create a DNS provider Secret. To create one, see our DNS Provider setup guide, and make note of your provider's secret name.
"},{"location":"dns-operator/docs/managedzone/#example-managedzone","title":"Example ManagedZone","text":"To create a new ManagedZone
with AWS Route, with a DNS Provider secret named my-aws-credentials
:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: my-test-aws-zone\n namespace: multi-cluster-gateways\nspec:\n domainName: mydomain.example.com\n description: \"My Managed Zone\"\n dnsProviderSecretRef:\n name: my-aws-credentials\nEOF\n
This will create a new Zone in AWS, for mydomain.example.com
, using the DNS Provider credentials in the my-aws-credentials
Secret.
If you'd like to create a ManagedZone
for an existing zone in AWS, note its Zone ID and run:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: my-test-aws-zone\n namespace: multi-cluster-gateways\nspec:\n id: MYZONEID\n domainName: mydomain.example.com\n description: \"My Managed Zone\"\n dnsProviderSecretRef:\n name: my-aws-credentials\nEOF\n
"},{"location":"dns-operator/docs/managedzone/#dnsprovidersecretref","title":"dnsProviderSecretRef","text":"This is a reference to secret containing the credentials and other configuration for accessing your dns provider dnsProvider
Note: the Secret referenced in the dnsProviderSecretRef
field must be in the same namespace as the ManagedZone.
Note: as an id
was specified, the Managed Gateway Controller will not re-create this zone, nor will it delete it if this ManagedZone
is deleted.
"},{"location":"dns-operator/docs/managedzone/#spec-of-a-managedzone","title":"Spec of a ManagedZone","text":"The ManagedZone is a simple resource with an uncomplicated API, see a sample here.
"},{"location":"dns-operator/docs/provider/","title":"Configuring a DNS Provider","text":"In order to be able to interact with supported DNS providers, Kuadrant needs a credential that it can use.
"},{"location":"dns-operator/docs/provider/#supported-providers","title":"Supported Providers","text":"Kuadrant Supports the following DNS providers currently
- AWS Route 53 (AWS)
- Google Cloud DNS (GCP)
"},{"location":"dns-operator/docs/provider/#aws-route-53-provider","title":"AWS Route 53 Provider","text":"Kuadrant expects a Secret
with a credential. Below is an example for AWS Route 53. It is important to set the secret type to aws
:
kubectl create secret generic my-aws-credentials \\\n --namespace=kuadrant-dns-system \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n --from-literal=AWS_REGION=eu-west-1 \\\n --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n
Key Example Value Description AWS_REGION
eu-west-1
AWS Region AWS_ACCESS_KEY_ID
XXXX
AWS Access Key ID (see note on permissions below) AWS_SECRET_ACCESS_KEY
XXXX
AWS Secret Access Key"},{"location":"dns-operator/docs/provider/#aws-iam-permissions-required","title":"AWS IAM Permissions Required","text":"We have tested using the available policy AmazonRoute53FullAccess
however it should also be possible to restrict the credential down to a particular zone. More info can be found in the AWS docs:
https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/access-control-managing-permissions.html
"},{"location":"dns-operator/docs/provider/#google-cloud-dns-provider","title":"Google Cloud DNS Provider","text":"Kuadant expects a secret with a credential. Below is an example for Google DNS. It is important to set the secret type to gcp
:
kubectl create secret generic my-test-gcp-credentials \\\n --namespace=kuadrant-dns-system \\\n --type=kuadrant.io/gcp \\\n --from-literal=PROJECT_ID=xxx \\\n --from-file=GOOGLE=$HOME/.config/gcloud/application_default_credentials.json\n
Env Var Example Value Description GOOGLE
{\"client_id\": \"***\",\"client_secret\": \"***\",\"refresh_token\": \"***\",\"type\": \"authorized_user\"}
This is the JSON created from either the credential created by the gcloud
CLI, or the JSON from the Service account PROJECT_ID
my_project_id
ID to the Google project"},{"location":"dns-operator/docs/provider/#google-cloud-dns-access-permissions-required","title":"Google Cloud DNS Access permissions required","text":"See: https://cloud.google.com/dns/docs/access-control#dns.admin
"},{"location":"dns-operator/docs/provider/#where-to-create-the-secrets","title":"Where to create the Secrets","text":"It is recommended that you create the secret in the same namespace as your ManagedZones
. In the examples above, we've stored these in a namespace called kuadrant-dns-system
.
Now that we have the credential created we have a DNS provider ready to go and can start using it.
"},{"location":"dns-operator/docs/provider/#using-a-credential","title":"Using a Credential","text":"Once a Secret
like the one shown above is created, in order for it to be used, it needs to be associated with a ManagedZone
.
See ManagedZone
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/","title":"List of issues","text":" - Re-queue validation intermittently GH-36
- Re-queue DNS Record whenever a write to the Cloud Provider occurs GH-35
- Schedule removal of finalizer from DNS Records GH-38
- Record write attempts in status for current generation GH-34
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#the-idea","title":"The idea","text":"We now will constantly reconcile DNS records. The reasoning is that other controllers may override/change records in the DNS provider so there is a need to requeue the DNS Record from time to time even when no local changes are introduced.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#details","title":"Details","text":"There are a few new fields on the DNS Record status:
- QueuedAt is a time when the DNS record was received for the reconciliation
- QueuedFor is a time when we expect a DNS record to be reconciled again
- ValidFor indicates the duration since the last reconciliation we consider data in the record to be valid
- WriteCounter represents a number of consecutive write attempts on the same generation of the record. It is being reset to 0 when the generation changes or there are no changes to write.
There is an option to override the ValidFor
and DefaultRequeueTime
with valid-for
and requeue-time
flags respectively.
The DefaultRequeueTime
is the duration between successful validation and the next reconciliation to ensure that the record is still up-to-date.
The ValidFor
is used to determine if we should do a full reconciliation when we get the record. If the record is still valid we will only update finalizers and validate the record itself. It will not perform anything that involves a DNS provider.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#dns-record-normal-lifecycle","title":"DNS Record normal lifecycle","text":"Once we enqueue the DNS record, controller will compile a list of changes to the DNS provider and will apply it. After this, the record is enqueued with the validationRequeueTime
and the Ready
condition will be marked as false
with a message Awaiting Validation
. When the record is received again and the controller ensures there are no changes needed (the ones applied are present in the DNS Provider) it sets the Ready
condition to true
and enqueues it with the defaultRequeueTime
.
At any time when the record is requeued we also set the record.Status.QueuedFor
field with a timestamp for when we expect to receive the record again. And on every reconciliation we set the record.Status.QueuedAt
to be the time of the reconciliation.
Upon deletion, the process will be similar. The controller will determine the changes needed to the DNS provider and will apply them. The record will be requeued with the validationRequeueTime
. Once we receive it back and ensure that there are no changes needed for the DNS provider we remove the finalizer from the record.
The validationRequeueTime
duration is randomized +/- 50%.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#when-things-go-south","title":"When things go south","text":"If the record is received prematurely - the ValidFor
+ QueuedAt
is more than the current time - we requeue it again for the ValidFor
duration.
When we encounter an error during the reconciliation we will not requeue the record and will put in an appropriate error message in the log and on the record. In order for it to reconcile again there must be a change to the DNS Record CR.
It is possible for a user to mess with the timestamps field or the ValidFor
field. Kubernetes will not let setting an invalid value to the timestamp fields. Once the timestamp fields are set manually it will trigger reconciliation since there is a change in the record CR. The only one that could impact the controller is the QueuedAt
field and the controller will believe that to be the last time the record was reconciled. As for the ValidFor
: since it is a simple string it is possible to set an incorrect value. If we fail to parse it we treat the ValidFor
as 0. This means that the controller will believe that the information in the record is expired and will probe the DNS provider for an update. If a valid value is provided controller will obey it. Eventually, the controller will naturally enqueue the record and those values will be overridden.
In case the controller fails to retain changes in the DNS Provider: write are successful, but the validation fails again and the WriteCounter
reaches the WriteCounterLimit
we give up on the reconciliation. The appropriate message will be put under the Ready - false
condition as well as in the logs of the controller. The reconciliation will resume once the generation of the DNS Record is changed.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#metrics","title":"Metrics","text":"There is a metric emitted from the controller: dns_provider_write_counter
. It reflects the WriteCounter
field in the status of the record.
"},{"location":"dns-operator/docs/reference/dnsrecord/","title":"The DNSRecord Custom Resource Definition (CRD)","text":" - DNSRecord
- DNSRecordSpec
- DNSRecordStatus
"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecord","title":"DNSRecord","text":"Field Type Required Description spec
DNSRecordSpec Yes The specification for DNSRecord custom resource status
DNSRecordStatus No The status for the custom resource"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordspec","title":"DNSRecordSpec","text":"Field Type Required Description ownerID
String Yes Unique string used to identify the owner of this record rootHost
String Yes Single root host of all endpoints in a DNSRecord managedZone
ManagedZoneReference Yes Reference to a ManagedZone instance to which this record will publish its endpoints endpoints
[]ExternalDNS Endpoint No Endpoints to manage in the dns provider healthCheck
HealthCheckSpec No Health check configuration"},{"location":"dns-operator/docs/reference/dnsrecord/#managedzonereference","title":"ManagedZoneReference","text":"Field Type Required Description name
String Yes Name of a managed zone"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description endpoint
String Yes Endpoint is the path to append to the host to reach the expected health check port
Number Yes Port to connect to the host on protocol
String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold
Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordstatus","title":"DNSRecordStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec conditions
[]Kubernetes meta/v1.Condition List of conditions that define the status of the resource queuedAt
Kubernetes meta/v1.Time QueuedAt is a time when DNS record was received for the reconciliation queuedFor
Kubernetes meta/v1.Time QueuedFor is a time when we expect a DNS record to be reconciled again validFor
String ValidFor indicates duration since the last reconciliation we consider data in the record to be valid writeCounter
Number WriteCounter represent a number of consecutive write attempts on the same generation of the record endpoints
[]ExternalDNS Endpoint Endpoints are the last endpoints that were successfully published by the provider healthCheck
HealthCheckStatus Health check status"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the health checks probes
[]HealthCheckStatusProbe Health check Probe status"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatusprobe","title":"HealthCheckStatusProbe","text":"Field Type Description id
String The health check id ipAddress
String The ip address being monitored host
String The host being monitored synced
Boolean Synced conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the probe"},{"location":"dns-operator/docs/reference/managedzone/","title":"The ManagedZone Custom Resource Definition (CRD)","text":" - ManagedZone
- ManagedZoneSpec
- ManagedZoneStatus
"},{"location":"dns-operator/docs/reference/managedzone/#managedzone","title":"ManagedZone","text":"Field Type Required Description spec
ManagedZoneSpec Yes The specification for ManagedZone custom resource status
ManagedZoneStatus No The status for the custom resource"},{"location":"dns-operator/docs/reference/managedzone/#managedzonespec","title":"ManagedZoneSpec","text":"Field Type Required Description id
String No ID is the provider assigned id of this zone (i.e. route53.HostedZone.ID) domainName
String Yes Domain name of this ManagedZone description
String No Description for this ManagedZone parentManagedZone
ManagedZoneReference No Reference to another managed zone that this managed zone belongs to dnsProviderSecretRef
SecretRef No Reference to a secret containing provider credentials"},{"location":"dns-operator/docs/reference/managedzone/#managedzonereference","title":"ManagedZoneReference","text":"Field Type Required Description name
String Yes Name of a managed zone"},{"location":"dns-operator/docs/reference/managedzone/#secretref","title":"SecretRef","text":"Field Type Required Description name
String Yes Name of the secret namespace
String Yes Namespace of the secret"},{"location":"dns-operator/docs/reference/managedzone/#managedzonestatus","title":"ManagedZoneStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource id
String The ID assigned by this provider for this zone (i.e. route53.HostedZone.ID) recordCount
Number The number of records in the provider zone nameServers
[]String The NameServers assigned by the provider for this zone (i.e. route53.DelegationSet.NameServers)"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"Kuadrant combines Gateway API and Istio-based gateway controllers to enhance application connectivity. It enables platform engineers and application developers to easily connect, secure, and protect their services and infrastructure across multiple clusters with policies for TLS, DNS, application authentication & authorization, and rate limiting. Additionally, Kuadrant offers observability templates to further support infrastructure management.
"},{"location":"#getting-started","title":"Getting Started","text":"For a quick local setup of Kuadrant, see our Single Cluster or Multi Cluster guides. Explore the single and multi-cluster architecture in our Architectural Overview.
"},{"location":"getting-started-multi-cluster-ocm/","title":"Getting Started with OCM","text":""},{"location":"getting-started-multi-cluster-ocm/#kuadrant-getting-started-multi-cluster","title":"Kuadrant Getting Started - Multi Cluster","text":""},{"location":"getting-started-multi-cluster-ocm/#prerequisites","title":"Prerequisites","text":" - Docker
- Kind
- Kubectl
- OpenSSL >= 3
- AWS account with Route 53 enabled or GCP with Cloud DNS enabled
- Docker Mac Net Connect (macOS users only)
"},{"location":"getting-started-multi-cluster-ocm/#dns-environmental-variables","title":"DNS Environmental Variables","text":"Export environment variables with the keys listed below for your desired provider. Fill in your own values as appropriate. Note that you will need to have created a root domain in AWS Route 53 or in GCP Cloud DNS:
"},{"location":"getting-started-multi-cluster-ocm/#aws","title":"AWS","text":"Env Var Example Value Description MGC_ZONE_ROOT_DOMAIN
jbloggs.hcpapps.net
Hostname for the root Domain MGC_AWS_DNS_PUBLIC_ZONE_ID
Z01234567US0IQE3YLO00
AWS Route 53 Zone ID for specified MGC_ZONE_ROOT_DOMAIN
MGC_AWS_ACCESS_KEY_ID
AKIA1234567890000000
Access Key ID, for user with permissions to Route 53 in the account where root domain is created MGC_AWS_SECRET_ACCESS_KEY
Z01234567US0000000
Access Secret Access Key, for user with permissions to Route 53 in the account where root domain is created MGC_AWS_REGION
eu-west-1
AWS Region"},{"location":"getting-started-multi-cluster-ocm/#gcp","title":"GCP","text":"Env Var Example Value Description GOOGLE
{\"client_id\": \"00000000-00000000000000.apps.googleusercontent.com\",\"client_secret\": \"d-FL95Q00000000000000\",\"refresh_token\": \"00000aaaaa00000000-AAAAAAAAAAAAKFGJFJDFKDK\",\"type\": \"authorized_user\"}
This is the JSON created from either the JSON credentials created by the Google Cloud CLI or a Service account PROJECT_ID
my_project_id
ID to the google project ZONE_NAME
jbloggs-google
Zone name ZONE_DNS_NAME
jbloggs.google.hcpapps.net
DNS name LOG_LEVEL
1
Log level for the Controller Alternatively, to set defaults, add the above environment variables to your .zshrc
or .bash_profile
.
"},{"location":"getting-started-multi-cluster-ocm/#set-the-release-you-want-to-use","title":"Set the release you want to use","text":"export MGC_BRANCH=release-0.3\n
"},{"location":"getting-started-multi-cluster-ocm/#set-up-clusters-and-install-kuadrant","title":"Set Up Clusters and install Kuadrant","text":"Run the following:
curl \"https://raw.githubusercontent.com/kuadrant/multicluster-gateway-controller/${MGC_BRANCH}/hack/quickstart-setup.sh\" | bash\n
"},{"location":"getting-started-multi-cluster-ocm/#whats-next","title":"What's Next","text":"Now that you have two Kind clusters configured with Kuadrant installed you are ready to begin the Multicluster Gateways walkthrough.
"},{"location":"getting-started-multi-cluster/","title":"Multi-Cluster","text":""},{"location":"getting-started-multi-cluster/#kuadrant-getting-started-multi-cluster","title":"Kuadrant Getting Started - Multi Cluster","text":""},{"location":"getting-started-multi-cluster/#overview","title":"Overview","text":"In this quick start, we will cover the setup of Kuadrant in multiple local kind clusters. This document is intended as a follow on to the single cluster guide. It can be used for adding 1 or more clusters to your local setup.
"},{"location":"getting-started-multi-cluster/#prerequisites","title":"Prerequisites","text":" - Completed the Single-cluster Quick Start
"},{"location":"getting-started-multi-cluster/#environmental-variables","title":"Environmental Variables","text":"The same environment variable requirements from the Single-cluster Quick Start apply to this document, including the KUADRANT_REF
variable.
"},{"location":"getting-started-multi-cluster/#set-up-a-kind-cluster-and-install-kuadrant","title":"Set Up a kind cluster and install Kuadrant","text":"Run the same quickstart script from the single cluster quick start:
curl \"https://raw.githubusercontent.com/kuadrant/kuadrant-operator/${KUADRANT_REF}/hack/quickstart-setup.sh\" | bash\n
The script will detect if you already have a cluster from the single cluster setup running, and prompt you for a multi cluster setup. This will setup an additional kind cluster, install Istio and install Kuadrant. You can re-run the script multiple times to add more clusters. Each cluster will have a number suffix in the name. For example: kuadrant-local-1
, kuadrant-local-2
, kuadrant-local-3
. The original cluster from the single cluster setup will keep its name of just kuadrant-local
.
"},{"location":"getting-started-multi-cluster/#clean-up","title":"Clean Up","text":"To ensure that any DNS records are removed, you should remove any DNSPolicy
and TLSPolicy
resources before deleting the local cluster.
"},{"location":"getting-started-multi-cluster/#whats-next","title":"What's Next","text":"The next step is to setup and use the policies provided by Kuadrant.
Secure, Protect and Connect your Gateway
"},{"location":"getting-started-single-cluster/","title":"Single-Cluster","text":""},{"location":"getting-started-single-cluster/#kuadrant-getting-started-single-cluster","title":"Kuadrant Getting Started - Single Cluster","text":""},{"location":"getting-started-single-cluster/#overview","title":"Overview","text":"In this quick start, we will cover:
- setup of Kuadrant in a singe local kind cluster
"},{"location":"getting-started-single-cluster/#prerequisites","title":"Prerequisites","text":" - Docker
- Kind
- Kubectl
- OpenSSL >= 3
- AWS account with Route 53 enabled or GCP with Cloud DNS enabled
- Docker Mac Net Connect (macOS users only)
"},{"location":"getting-started-single-cluster/#environmental-variables","title":"Environmental Variables","text":""},{"location":"getting-started-single-cluster/#general","title":"General","text":"Env Var Example Value Description ISTIO_INSTALL_SAIL
true
Whether to install istio through project sail, default false
If you want to make use of the Kuadrant DNSPolicy
you should setup the following environmental variables depending on your DNS Provider:
"},{"location":"getting-started-single-cluster/#aws","title":"AWS","text":"Env Var Example Value Description KUADRANT_ZONE_ROOT_DOMAIN
jbloggs.hcpapps.net
Hostname for the root Domain KUADRANT_AWS_DNS_PUBLIC_ZONE_ID
Z01234567US0IQE3YLO00
AWS Route 53 Zone ID for specified KUADRANT_ZONE_ROOT_DOMAIN
KUADRANT_AWS_ACCESS_KEY_ID
AKIA1234567890000000
Access Key ID, for user with permissions to Route 53 in the account where root domain is created KUADRANT_AWS_SECRET_ACCESS_KEY
Z01234567US0000000
Access Secret Access Key, for user with permissions to Route 53 in the account where root domain is created KUADRANT_AWS_REGION
eu-west-1
AWS Region"},{"location":"getting-started-single-cluster/#gcp","title":"GCP","text":"Env Var Example Value Description GOOGLE
{\"client_id\": \"00000000-00000000000000.apps.googleusercontent.com\",\"client_secret\": \"d-FL95Q00000000000000\",\"refresh_token\": \"00000aaaaa00000000-AAAAAAAAAAAAKFGJFJDFKDK\",\"type\": \"authorized_user\"}
This is the JSON created from either the JSON credentials created by the Google Cloud CLI or a Service account PROJECT_ID
my_project_id
ID to the google project ZONE_NAME
jbloggs-google
Zone name ZONE_DNS_NAME
jbloggs.google.hcpapps.net
DNS name LOG_LEVEL
1
Log level for the Controller Alternatively, to set defaults, add the above environment variables to your .zshrc
or .bash_profile
.
"},{"location":"getting-started-single-cluster/#set-the-release-you-want-to-use","title":"Set the release you want to use","text":"export KUADRANT_REF=v0.8.0\nexport ISTIO_INSTALL_SAIL=true\n
"},{"location":"getting-started-single-cluster/#set-up-a-kind-cluster-and-install-kuadrant","title":"Set Up a kind cluster and install Kuadrant","text":"Run the following:
curl \"https://raw.githubusercontent.com/kuadrant/kuadrant-operator/${KUADRANT_REF}/hack/quickstart-setup.sh\" | bash\n
This will setup a single kind cluster, install Istio and install Kuadrant. Once this completes you should be able to move on to using the various policy apis offered by Kuadrant."},{"location":"getting-started-single-cluster/#clean-up","title":"Clean Up","text":"To ensure that any DNS records are removed, you should remove any DNSPolicy
and TLSPolicy
resources before deleting the local cluster.
"},{"location":"getting-started-single-cluster/#whats-next","title":"What's Next","text":"The next step is to setup and use the policies provided by Kuadrant.
Secure, Protect and Connect your Gateway
"},{"location":"kuadrant-operator/","title":"Kuadrant Operator","text":"The Operator to install and manage the lifecycle of the Kuadrant components deployments.
"},{"location":"kuadrant-operator/#overview","title":"Overview","text":"Kuadrant is a re-architecture of API Management using Cloud Native concepts and separating the components to be less coupled, more reusable and leverage the underlying kubernetes platform. It aims to deliver a smooth experience to providers and consumers of applications & services when it comes to rate limiting, authentication, authorization, discoverability, change management, usage contracts, insights, etc.
Kuadrant aims to produce a set of loosely coupled functionalities built directly on top of Kubernetes. Furthermore, it only strives to provide what Kubernetes doesn\u2019t offer out of the box, i.e. Kuadrant won\u2019t be designing a new Gateway/proxy, instead it will opt to connect with what\u2019s there and what\u2019s being developed (think Envoy, Istio, GatewayAPI).
Kuadrant is a system of cloud-native k8s components that grows as users\u2019 needs grow.
- From simple protection of a Service (via AuthN) that is used by teammates working on the same cluster, or \u201csibling\u201d services, up to AuthZ of users using OIDC plus custom policies.
- From no rate-limiting to rate-limiting for global service protection on to rate-limiting by users/plans
"},{"location":"kuadrant-operator/#architecture","title":"Architecture","text":"Kuadrant relies on Istio and the Gateway API to operate the cluster (Istio's) ingress gateway to provide API management with authentication (authN), authorization (authZ) and rate limiting capabilities.
"},{"location":"kuadrant-operator/#kuadrant-components","title":"Kuadrant components","text":"CRD Description Control Plane The control plane takes the customer desired configuration (declaratively as kubernetes custom resources) as input and ensures all components are configured to obey customer's desired behavior. This repository contains the source code of the kuadrant control plane Kuadrant Operator A Kubernetes Operator to manage the lifecycle of the kuadrant deployment Authorino The AuthN/AuthZ enforcer. As the external istio authorizer (envoy external authorization serving gRPC service) Limitador The external rate limiting service. It exposes a gRPC service implementing the Envoy Rate Limit protocol (v3) Authorino Operator A Kubernetes Operator to manage Authorino instances Limitador Operator A Kubernetes Operator to manage Limitador instances DNS Operator A Kubernetes Operator to manage DNS records in external providers"},{"location":"kuadrant-operator/#provided-apis","title":"Provided APIs","text":"The kuadrant control plane owns the following Custom Resource Definitions, CRDs:
CRD Description Example AuthPolicy CRD [doc] [reference] Enable AuthN and AuthZ based access control on workloads AuthPolicy CR RateLimitPolicy CRD [doc] [reference] Enable access control on workloads based on HTTP rate limiting RateLimitPolicy CR DNSPolicy CRD [doc] [reference] Enable DNS management DNSPolicy CR TLSPolicy CRD [doc] [reference] Enable TLS management TLSPolicy CR Additionally, Kuadrant provides the following CRDs
CRD Owner Description Example Kuadrant CRD Kuadrant Operator Represents an instance of kuadrant Kuadrant CR Limitador CRD Limitador Operator Represents an instance of Limitador Limitador CR Authorino CRD Authorino Operator Represents an instance of Authorino Authorino CR "},{"location":"kuadrant-operator/#getting-started","title":"Getting started","text":""},{"location":"kuadrant-operator/#pre-requisites","title":"Pre-requisites","text":" - Istio is installed in the cluster. Otherwise, refer to the Istio getting started guide.
- Kubernetes Gateway API is installed in the cluster. Otherwise, configure Istio to expose a service using the Kubernetes Gateway API.
- cert-manager is installed in the cluster. Otherwise, refer to the cert-manager installation guide.
"},{"location":"kuadrant-operator/#installing-kuadrant","title":"Installing Kuadrant","text":"Installing Kuadrant is a two-step procedure. Firstly, install the Kuadrant Operator and secondly, request a Kuadrant instance by creating a Kuadrant custom resource.
"},{"location":"kuadrant-operator/#1-install-the-kuadrant-operator","title":"1. Install the Kuadrant Operator","text":"The Kuadrant Operator is available in public community operator catalogs, such as the Kubernetes OperatorHub.io and the Openshift Container Platform and OKD OperatorHub.
Kubernetes
The operator is available from OperatorHub.io. Just go to the linked page and follow installation steps (or just run these two commands):
# Install Operator Lifecycle Manager (OLM), a tool to help manage the operators running on your cluster.\n\ncurl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.23.1/install.sh | bash -s v0.23.1\n\n# Install the operator by running the following command:\n\nkubectl create -f https://operatorhub.io/install/kuadrant-operator.yaml\n
Openshift
The operator is available from the Openshift Console OperatorHub. Just follow installation steps choosing the \"Kuadrant Operator\" from the catalog:
"},{"location":"kuadrant-operator/#2-request-a-kuadrant-instance","title":"2. Request a Kuadrant instance","text":"Create the namespace:
kubectl create namespace kuadrant\n
Apply the Kuadrant
custom resource:
kubectl -n kuadrant apply -f - <<EOF\n---\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant-sample\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/#protect-your-service","title":"Protect your service","text":""},{"location":"kuadrant-operator/#if-you-are-an-api-provider","title":"If you are an API Provider","text":" - Deploy the service/API to be protected (\"Upstream\")
- Expose the service/API using the kubernetes Gateway API, ie HTTPRoute object.
- Write and apply the Kuadrant's RateLimitPolicy and/or AuthPolicy custom resources targeting the HTTPRoute resource to have your API protected.
"},{"location":"kuadrant-operator/#if-you-are-a-cluster-operator","title":"If you are a Cluster Operator","text":" - (Optionally) deploy istio ingress gateway using the Gateway resource.
- Write and apply the Kuadrant's RateLimitPolicy and/or AuthPolicy custom resources targeting the Gateway resource to have your gateway traffic protected.
"},{"location":"kuadrant-operator/#user-guides","title":"User guides","text":"The user guides section of the docs gathers several use-cases as well as the instructions to implement them using kuadrant.
- Simple Rate Limiting for Application Developers
- Authenticated Rate Limiting for Application Developers
- Gateway Rate Limiting for Cluster Operators
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/#kuadrant-rate-limiting","title":"Kuadrant Rate Limiting","text":""},{"location":"kuadrant-operator/#documentation","title":"Documentation","text":"Docs can be found on the Kuadrant website.
"},{"location":"kuadrant-operator/#contributing","title":"Contributing","text":"The Development guide describes how to build the kuadrant operator and how to test your changes before submitting a patch or opening a PR.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"kuadrant-operator/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"kuadrant-operator/doc/auth/","title":"Kuadrant Auth","text":"A Kuadrant AuthPolicy custom resource:
- Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to enforce auth.
- Supports targeting subsets (sections) of a network resource to apply the auth rules to.
- Abstracts the details of the underlying external authorization protocol and configuration resources, that have a much broader remit and surface area.
- Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/auth/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/auth/#envoys-external-authorization-protocol","title":"Envoy's External Authorization Protocol","text":"Kuadrant's Auth implementation relies on the Envoy's External Authorization protocol. The workflow per request goes:
- On incoming request, the gateway checks the matching rules for enforcing the auth rules, as stated in the AuthPolicy custom resources and targeted Gateway API networking objects
- If the request matches, the gateway sends one CheckRequest to the external auth service (\"Authorino\").
- The external auth service responds with a CheckResponse back to the gateway with either an
OK
or DENIED
response code.
An AuthPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external auth service.
"},{"location":"kuadrant-operator/doc/auth/#the-authpolicy-custom-resource","title":"The AuthPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/auth/#overview","title":"Overview","text":"The AuthPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - Authentication/authorization scheme (
spec.rules
) - Top-level route selectors (
spec.routeSelectors
) - Top-level additional conditions (
spec.when
) - List of named patterns (
spec.patterns
)
The auth scheme specify rules for:
- Authentication (
spec.rules.authentication
) - External auth metadata fetching (
spec.rules.metadata
) - Authorization (
spec.rules.authorization
) - Custom response items (
spec.rules.response
) - Callbacks (
spec.rules.callbacks
)
Each auth rule can declare specific routeSelectors
and when
conditions for the rule to apply.
The auth scheme (rules
), as well as conditions and named patterns can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults
or overrides
blocks.
"},{"location":"kuadrant-operator/doc/auth/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-auth-policy\nspec:\n # Reference to an existing networking resource to attach the policy to. REQUIRED.\n # It can be a Gateway API HTTPRoute or Gateway resource.\n # It can only refer to objects in the same namespace as the AuthPolicy.\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n\n # Selectors of HTTPRouteRules within the targeted HTTPRoute that activate the AuthPolicy.\n # Each element contains a HTTPRouteMatch object that will be used to select HTTPRouteRules that include at least\n # one identical HTTPRouteMatch.\n # The HTTPRouteMatch part does not have to be fully identical, but the what's stated in the selector must be\n # identically stated in the HTTPRouteRule.\n # Do not use it on AuthPolicies that target a Gateway.\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n\n # Additional dynamic conditions to trigger the AuthPolicy.\n # Use it for filtering attributes not supported by HTTPRouteRule or with AuthPolicies that target a Gateway.\n # Check out https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md to learn more\n # about the Well-known Attributes that can be used in this field.\n # Equivalent to if otherwise declared within `defaults`.\n when: [\u2026]\n\n # Sets of common patterns of selector-operator-value triples, to be referred by name in `when` conditions\n # and pattern-matching rules. Often employed to avoid repetition in the policy.\n # Equivalent to if otherwise declared within `defaults`.\n patterns: {\u2026}\n\n # The auth rules to apply to the network traffic routed through the targeted resource.\n # Equivalent to if otherwise declared within `defaults`.\n rules:\n # Authentication rules to enforce.\n # At least one config must evaluate to a valid identity object for the auth request to be successful.\n # If omitted or empty, anonymous access is assumed.\n authentication:\n \"my-authn-rule\":\n # The authentication method of this rule.\n # One-of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous.\n apiKey: {\u2026}\n\n # Where credentials are required to be passed in the request for authentication based on this rule.\n # One-of: authorizationHeader, customHeader, queryString, cookie.\n credentials:\n authorizationHeader:\n prefix: APIKEY\n\n # Rule-level route selectors.\n routeSelectors: [\u2026]\n\n # Rule-level additional conditions.\n when: [\u2026]\n\n # Configs for caching the resolved object returned out of evaluating this auth rule.\n cache: {\u2026}\n\n # Rules for fetching auth metadata from external sources.\n metadata:\n \"my-external-source\":\n # The method for fetching metadata from the external source.\n # One-of: http: userInfo, uma.\n http: {\u2026}\n\n # Authorization rules to enforce.\n # All policies must allow access for the auth request be successful.\n authorization:\n \"my-authz-rule\":\n # The authorization method of this rule.\n # One-of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb.\n opa: {\u2026}\n\n # Customizations to the authorization response.\n response:\n # Custom denial status and other HTTP attributes for unauthenticated requests.\n unauthenticated: {\u2026}\n\n # Custom denial status and other HTTP attributes for unauhtorized requests.\n unauthorized: {\u2026}\n\n # Custom response items when access is granted.\n success:\n # Custom response items wrapped as HTTP headers to be injected in the request\n headers:\n \"my-custom-header\":\n # One-of: plain, json, wristband.\n plain: {\u2026}\n\n # Custom response items wrapped as envoy dynamic metadata.\n dynamicMetadata:\n # One-of: plain, json, wristband.\n \"my-custom-dyn-metadata\":\n json: {\u2026}\n\n # Rules for post-authorization callback requests to external services.\n # Triggered regardless of the result of the authorization request.\n callbacks:\n \"my-webhook\":\n http: {\u2026}\n\n # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n # routes that lack a more specific policy attached to.\n # Mutually exclusive with `overrides` and with declaring the `rules`, `when` and `patterns` at the top-level of\n # the spec.\n defaults:\n rules:\n authentication: {\u2026}\n metadata: {\u2026}\n authorization: {\u2026}\n response: {\u2026}\n callbacks: {\u2026}\n when: [\u2026]\n patterns: {\u2026}\n\n # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n # thus also overriding any more specific policy occasionally attached to any of those routes.\n # Mutually exclusive with `defaults` and with declaring `rules`, `when` and `patterns` at the top-level of\n # the spec.\n overrides:\n rules:\n authentication: {\u2026}\n metadata: {\u2026}\n authorization: {\u2026}\n response: {\u2026}\n callbacks: {\u2026}\n when: [\u2026]\n patterns: {\u2026}\n
Check out the API reference for a full specification of the AuthPolicy CRD.
"},{"location":"kuadrant-operator/doc/auth/#using-the-authpolicy","title":"Using the AuthPolicy","text":""},{"location":"kuadrant-operator/doc/auth/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"When an AuthPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs
field of the HTTPRoute.
The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets, by specifying the routeSelectors
field of the AuthPolicy spec.
Target a HTTPRoute by setting the spec.targetRef
field of the AuthPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-route-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: <HTTPRoute Name>\n rules: {\u2026}\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502 \u2502 (App namespace) \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 parentRefs \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u25b2 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 targetRef \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 \u2502 \u2502 AuthPolicy \u2502 \u2502\n\u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/auth/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"If an AuthPolicy targets a route defined for *.com
and another AuthPolicy targets another route for api.com
, the Kuadrant control plane will not merge these two AuthPolicies. Rather, it will mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and auth rules.
E.g., a request coming for api.com
will be protected according to the rules from the AuthPolicy that targets the route for api.com
; while a request for other.com
will be protected with the rules from the AuthPolicy targeting the route for *.com
.
Example with 3 AuthPolicies and 3 HTTPRoutes:
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy A will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy B will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy W will be enforced
"},{"location":"kuadrant-operator/doc/auth/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"An AuthPolicy that targets a Gateway can declare a block of defaults (spec.defaults
) or a block of overrides (spec.overrides
). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.
When declaring defaults, an AuthPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific AuthPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default AuthPolicy, as well as changes in the existing HTTPRoutes.
Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting preemptive \"deny-all\" policies on hostnames and hostname wildcards.
Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.
Target a Gateway HTTPRoute by setting the spec.targetRef
field of the AuthPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: my-gw-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n defaults: # alternatively: `overrides`\n rules: {\u2026}\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502 \u2502 (App namespace) \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 parentRefs \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u25b2 \u2502 \u2502 \u25b2 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u2502 targetRef \u2502 \u2502 \u2502 targetRef \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2502 \u2502 AuthPolicy \u2502 \u2502 \u2502 \u2502 AuthPolicy \u2502 \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/auth/#overlapping-gateway-and-httproute-authpolicies","title":"Overlapping Gateway and HTTPRoute AuthPolicies","text":"Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.
Gateway AuthPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute AuthPolicy exists, in which case the HTTPRoute AuthPolicy prevails.
Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy G (defaults) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy A will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy B will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy W will be enforced - Request to
other.com
(suppose a route exists) \u2192 AuthPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
Gateway AuthPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute AuthPolicy.
Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):
- AuthPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - AuthPolicy G (overrides) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
b.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
other.toystore.com
\u2192 AuthPolicy G will be enforced - Request to
other.com
(suppose a route exists) \u2192 AuthPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
"},{"location":"kuadrant-operator/doc/auth/#route-selectors","title":"Route selectors","text":"Route selectors allow targeting sections of a HTTPRoute, by specifying sets of HTTPRouteMatches and/or hostnames that make the policy controller look up within the HTTPRoute spec for compatible declarations, and select the corresponding HTTPRouteRules and hostnames, to then build conditions that activate the policy or policy rule.
Check out Route selectors for a full description, semantics and API reference.
"},{"location":"kuadrant-operator/doc/auth/#when-conditions","title":"when
conditions","text":"when
conditions can be used to scope an AuthPolicy or auth rule within an AuthPolicy (i.e. to filter the traffic to which a policy or policy rule applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules via routeSelectors
.
Use when
conditions to conditionally activate policies and policy rules based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames
and spec.rules.matches
fields, or in general in AuthPolicies that target a Gateway.
when
conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.
The selectors within the when
conditions of an AuthPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.
Authorino JSON path string modifiers can also be applied to the selectors within the when
conditions of an AuthPolicy.
"},{"location":"kuadrant-operator/doc/auth/#examples","title":"Examples","text":"Check out the following user guides for examples of protecting services with Kuadrant:
- Enforcing authentication & authorization with Kuadrant AuthPolicy, for app developers and platform engineers
- Authenticated Rate Limiting for Application Developers
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/auth/#known-limitations","title":"Known limitations","text":" - One HTTPRoute can only be targeted by one AuthPolicy.
- One Gateway can only be targeted by one AuthPolicy.
- AuthPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the AuthPolicy.
- 2+ AuthPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/auth/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"Kuadrant currently does not support multiple AuthPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames
field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one AuthPolicy targeting any of those resources that specify identical hostnames is allowed.
Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.
This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that is honored by the API gateway at each specific request, in cases of conflicting hostnames.
To exemplify one way this limitation can impact deployments, consider the following topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2 \u25b2\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 AuthPolicy \u2502 \u2502 AuthPolicy \u2502\n \u2502 (policy-1) \u2502 \u2502 (policy-2) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
In the example above, with the policy-1
resource created before policy-2
, policy-1
will be enforced on all requests to app.io/foo
while policy-2
will be rejected. I.e. app.io/bar
will not be secured. In fact, the status conditions of policy-2
shall reflect Enforced=false
with message \"AuthPolicy has encountered some issues: AuthScheme is not ready yet\".
Notice the enforcement of policy-1
and no enforcement of policy-2
is the opposite behavior as the analogous problem with the Kuadrant RateLimitPolicy.
A slightly different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 AuthPolicy \u2502\n \u2502 \u2502 (policy-2) \u2502\n \u25bc \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 AuthPolicy \u2502\n \u2502 (policy-1) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Once again, requests to app.io/foo
will be protected under AuthPolicy policy-1
, while requests to app.io/bar
will not be protected under any policy at all, unlike expected gateway policy policy-2
enforced as default. Both policies will report status condition as Enforced
nonetheless.
To avoid these problems, use different hostnames in each route.
"},{"location":"kuadrant-operator/doc/auth/#implementation-details","title":"Implementation details","text":"Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy
and an Authorino AuthConfig
custom resources.
Only requests that matches the rules in the Istio AuthorizationPolicy
cause an authorization request to be sent to the external authorization service (\"Authorino\"), i.e., only requests directed to the HTTPRouteRules targeted by the AuthPolicy (directly or indirectly), according to the declared top-level route selectors (if present), or all requests for which a matching HTTPRouteRule exists (otherwise).
Authorino looks up for the auth scheme (AuthConfig
custom resource) to enforce using the provided hostname of the original request as key. It then checks again if the request matches at least one of the selected HTTPRouteRules, in which case it enforces the auth scheme.
Exception to the rule Due to limitations imposed by the Istio `AuthorizationPolicy`, there are a few patterns of HTTPRouteRules that cannot be translated to filters for the external authorization request. Therefore, the following patterns used in HTTPRouteMatches of top-level route selectors of an AuthPolicy will not be included in the Istio AuthorizationPolicy rules that trigger the check request with Authorino: `PathMatchRegularExpression`, `HeaderMatchRegularExpression`, and `HTTPQueryParamMatch`. As a consequence to the above, requests that do not match these rules and otherwise would not be checked with Authorino will result in a request to the external authorization service. Authorino nonetheless will still verify those patterns and ensure the auth scheme is enforced only when it matches a selected HTTPRouteRule. Users of Kuadrant may observe an unnecessary call to the authorization service in those cases where the request is out of the scope of the AuthPolicy and therefore always authorized."},{"location":"kuadrant-operator/doc/auth/#internal-custom-resources-and-namespaces","title":"Internal custom resources and namespaces","text":"While the Istio AuthorizationPolicy
needs to be created in the same namespace as the gateway workload, the Authorino AuthConfig
is created in the namespace of the AuthPolicy
itself. This allows to simplify references such as to Kubernetes Secrets referred in the AuthPolicy, as well as the RBAC to support the architecture.
"},{"location":"kuadrant-operator/doc/development/","title":"Development Guide","text":""},{"location":"kuadrant-operator/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - operator-sdk version v1.28.1
- kind version v0.22.0
- git
- go version 1.21+
- kubernetes version v1.19+
- kubectl version v1.19+
"},{"location":"kuadrant-operator/doc/development/#build","title":"Build","text":"make\n
"},{"location":"kuadrant-operator/doc/development/#run-locally","title":"Run locally","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Then, run the operator locally
make run\n
"},{"location":"kuadrant-operator/doc/development/#deploy-the-operator-in-a-deployment-object","title":"Deploy the operator in a deployment object","text":"make local-setup\n
List of tasks done by the command above:
- Create local cluster using kind
- Build kuadrant docker image from the current working directory
- Deploy Kuadrant control plane (including istio, authorino and limitador)
TODO: customize with custom authorino and limitador git refs. Make sure Makefile propagates variable to deploy
target
"},{"location":"kuadrant-operator/doc/development/#deploy-kuadrant-operator-using-olm","title":"Deploy kuadrant operator using OLM","text":"You can deploy kuadrant using OLM just running few commands. No need to build any image. Kuadrant engineering team provides latest
and release version tagged images. They are available in the Quay.io/Kuadrant image repository.
Create kind cluster
make kind-create-cluster\n
Deploy OLM system
make install-olm\n
Deploy kuadrant using OLM. The make deploy-catalog
target accepts the following variables:
Makefile Variable Description Default value CATALOG_IMG
Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
"},{"location":"kuadrant-operator/doc/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"If you want to deploy (using OLM) a custom kuadrant operator, you need to build your own catalog. Furthermore, if you want to deploy a custom limitador or authorino operator, you also need to build your own catalog. The kuadrant operator bundle includes the authorino or limtador operator dependency version, hence using other than latest
version requires a custom kuadrant operator bundle and a custom catalog including the custom bundle.
"},{"location":"kuadrant-operator/doc/development/#build-kuadrant-operator-bundle-image","title":"Build kuadrant operator bundle image","text":"The make bundle
target accepts the following variables:
Makefile Variable Description Default value Notes IMG
Kuadrant operator image URL quay.io/kuadrant/kuadrant-operator:latest
TAG
var could be use to build this URL, defaults to latest if not provided VERSION
Bundle version 0.0.0
LIMITADOR_OPERATOR_BUNDLE_IMG
Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest
LIMITADOR_OPERATOR_VERSION
var could be used to build this, defaults to latest if not provided AUTHORINO_OPERATOR_BUNDLE_IMG
Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest
AUTHORINO_OPERATOR_VERSION
var could be used to build this, defaults to latest if not provided DNS_OPERATOR_BUNDLE_IMG
DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest
DNS_OPERATOR_BUNDLE_IMG
var could be used to build this, defaults to latest if not provided RELATED_IMAGE_WASMSHIM
WASM shim image URL oci://quay.io/kuadrant/wasm-shim:latest
WASM_SHIM_VERSION
var could be used to build this, defaults to latest if not provided - Build the bundle manifests
make bundle [IMG=quay.io/kuadrant/kuadrant-operator:latest] \\\n [VERSION=0.0.0] \\\n [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \\\n [RELATED_IMAGE_WASMSHIM=oci://quay.io/kuadrant/wasm-shim:latest]\n
- Build the bundle image from the manifests
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
- Push the bundle image to a registry
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
Frequently, you may need to build custom kuadrant bundle with the default (latest
) Limitador and Authorino bundles. These are the example commands to build the manifests, build the bundle image and push to the registry.
In the example, a new kuadrant operator bundle version 0.8.0
will be created that references the kuadrant operator image quay.io/kuadrant/kuadrant-operator:v0.5.0
and latest Limitador and Authorino bundles.
# manifests\nmake bundle IMG=quay.io/kuadrant/kuadrant-operator:v0.5.0 VERSION=0.8.0\n\n# bundle image\nmake bundle-build BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n\n# push bundle image\nmake bundle-push BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n
"},{"location":"kuadrant-operator/doc/development/#build-custom-catalog","title":"Build custom catalog","text":"The catalog's format will be File-based Catalog.
Make sure all the required bundles are pushed to the registry. It is required by the opm
tool.
The make catalog
target accepts the following variables:
Makefile Variable Description Default value BUNDLE_IMG
Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
LIMITADOR_OPERATOR_BUNDLE_IMG
Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest
AUTHORINO_OPERATOR_BUNDLE_IMG
Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest
DNS_OPERATOR_BUNDLE_IMG
DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest
make catalog [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest] \\\n [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest]\n
- Build the catalog image from the manifests
Makefile Variable Description Default value CATALOG_IMG
Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make catalog-build [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
- Push the catalog image to a registry
make catalog-push [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
You can try out your custom catalog image following the steps of the Deploy kuadrant operator using OLM section.
"},{"location":"kuadrant-operator/doc/development/#cleaning-up","title":"Cleaning up","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/development/#run-tests","title":"Run tests","text":""},{"location":"kuadrant-operator/doc/development/#unittests","title":"Unittests","text":"make test-unit\n
Optionally, add TEST_NAME
makefile variable to run specific test
make test-unit TEST_NAME=TestLimitIndexEquals\n
or even subtest
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
"},{"location":"kuadrant-operator/doc/development/#integration-tests","title":"Integration tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Run integration tests
make test-integration\n
"},{"location":"kuadrant-operator/doc/development/#all-tests","title":"All tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind and deploy kuadrant deps
make local-env-setup\n
Run all tests
make test\n
"},{"location":"kuadrant-operator/doc/development/#lint-tests","title":"Lint tests","text":"make run-lint\n
"},{"location":"kuadrant-operator/doc/development/#uninstall-kuadrant-crds","title":"(Un)Install Kuadrant CRDs","text":"You need an active session open to a kubernetes cluster.
Remove CRDs
make uninstall\n
"},{"location":"kuadrant-operator/doc/dns/","title":"Kuadrant DNS","text":"A Kuadrant DNSPolicy custom resource:
- Targets Gateway API networking resources Gateways to provide dns management by managing the lifecycle of dns records in external dns providers such as AWS Route53 and Google DNS.
"},{"location":"kuadrant-operator/doc/dns/#how-it-works","title":"How it works","text":"A DNSPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external DNS service. The needed dns names are gathered from the listener definitions and the IPAdresses | CNAME hosts are gathered from the status block of the gateway resource.
"},{"location":"kuadrant-operator/doc/dns/#the-dnspolicy-custom-resource","title":"The DNSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/dns/#overview","title":"Overview","text":"The DNSPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - DNS Routing Strategy (
spec.routingStrategy
) - LoadBalancing specification (
spec.loadBalancing
) - HealthCheck specification (
spec.healthCheck
)
"},{"location":"kuadrant-operator/doc/dns/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: my-dns-policy\nspec:\n # reference to an existing networking resource to attach the policy to\n # it can only be a Gateway API Gateway resource\n # it can only refer to objects in the same namespace as the DNSPolicy\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: mygateway\n\n # (optional) routing strategy to use when creating DNS records, defaults to `loadbalanced`\n # determines what DNS records are created in the DNS provider\n # check out Kuadrant RFC 0005 https://github.com/Kuadrant/architecture/blob/main/rfcs/0005-single-cluster-dnspolicy.md to learn more about the Routing Strategy field\n # One-of: simple, loadbalanced.\n routingStrategy: loadbalanced\n\n # (optional) loadbalancing specification\n # use it for providing the specification of how dns will be configured in order to provide balancing of load across multiple clusters when using the `loadbalanced` routing strategy\n # Primary use of this is for multi cluster deployments\n # check out Kuadrant RFC 0003 https://github.com/Kuadrant/architecture/blob/main/rfcs/0003-dns-policy.md to learn more about the options that can be used in this field\n loadBalancing:\n # (optional) weighted specification\n # use it to control the weight value applied to records\n weighted:\n # use it to change the weight of a record based on labels applied to the target meta resource i.e. Gateway in a single cluster context or ManagedCluster in multi cluster with OCM\n custom:\n\n - weight: 200\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n # (optional) weight value that will be applied to weighted dns records by default. Integer greater than 0 and no larger than the maximum value accepted by the target dns provider, defaults to `120` \n defaultWeight: 100\n # (optional) geo specification\n # use it to control the geo value applied to records \n geo:\n # (optional) default geo to be applied to records \n defaultGeo: IE\n\n # (optional) health check specification\n # health check probes with the following specification will be created for each DNS target\n healthCheck:\n allowInsecureCertificates: true\n endpoint: /\n expectedResponses:\n\n - 200\n - 201\n - 301\n failureThreshold: 5\n port: 443\n protocol: https\n
Check out the API reference for a full specification of the DNSPolicy CRD.
"},{"location":"kuadrant-operator/doc/dns/#using-the-dnspolicy","title":"Using the DNSPolicy","text":""},{"location":"kuadrant-operator/doc/dns/#dns-provider-and-managedzone-setup","title":"DNS Provider and ManagedZone Setup","text":"A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about dns providers, and what domains these dns providers are currently hosting. This is done through the creation of ManagedZones and dns provider secrets containing the credentials for the dns provider account.
If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net
:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: my-gw\nspec:\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: echo.apps.hcpapps.net\n port: 80\n protocol: HTTP\n
In order for the DNSPolicy to act upon that listener, a ManagedZone must exist for that hostnames' domain.
apiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: apps.hcpapps.net\nspec:\n domainName: apps.hcpapps.net\n description: \"apps.hcpapps.net managed domain\"\n dnsProviderSecretRef:\n name: my-aws-credentials\n
The managed zone references a secret containing the external DNS provider services credentials.
apiVersion: v1\nkind: Secret\nmetadata:\n name: my-aws-credentials\n namespace: <ManagedZone Namespace>\ndata:\n AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n AWS_REGION: <AWS_REGION>\n AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\ntype: kuadrant.io/aws\n
"},{"location":"kuadrant-operator/doc/dns/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"When a DNSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a matching ManagedZone.
Target a Gateway by setting the spec.targetRef
field of the DNSPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: DNSPolicy\nmetadata:\n name: <DNSPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/dns/#dnsrecord-resource","title":"DNSRecord Resource","text":"The DNSPolicy will create a DNSRecord resource for each listener hostname with a suitable ManagedZone configured. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.
Given the following multi cluster gateway status:
status:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-1/172.31.201.1\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-2/172.31.202.1\n listeners:\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-1.api\n supportedKinds: []\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-2.api\n supportedKinds: [] \n
A DNSPolicy targeting this gateway would create an appropriate DNSRecord based on the routing strategy selected.
"},{"location":"kuadrant-operator/doc/dns/#loadbalanced","title":"loadbalanced","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: echo.apps.hcpapps.net\n namespace: <Gateway Namespace>\nspec:\n endpoints:\n\n - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n managedZone:\n name: apps.hcpapps.net \n
After DNSRecord reconciliation the listener hostname should be resolvable through dns:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/dns/#simple","title":"simple","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: echo.apps.hcpapps.net\n namespace: <Gateway Namespace>\nspec:\n endpoints:\n\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n - 172.31.202.1\n managedZone:\n name: apps.hcpapps.net \n
After DNSRecord reconciliation the listener hostname should be resolvable through dns:
dig echo.apps.hcpapps.net +short\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/dns/#examples","title":"Examples","text":"Check out the following user guides for examples of using the Kuadrant DNSPolicy:
"},{"location":"kuadrant-operator/doc/dns/#known-limitations","title":"Known limitations","text":" - One Gateway can only be targeted by one DNSPolicy.
- DNSPolicies can only target Gateways defined within the same namespace of the DNSPolicy.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/","title":"DNS Health Checks","text":"DNS Health Checks are a tool provided by some DNS Providers for ensuring the availability and reliability of your DNS Records and only publishing DNS Records that resolve to healthy workloads. Kuadrant offers a powerful feature known as DNSPolicy, which allows you to configure these health checks for all the managed DNS endpoints created as a result of that policy. This guide provides a comprehensive overview of how to set up, utilize, and understand these DNS health checks.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#supported-providers","title":"Supported Providers","text":"we currently only support AWS Route53 DNS Health checks.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#configuration-of-health-checks","title":"Configuration of Health Checks","text":"To configure a DNS health check, you need to specify the healthCheck
section of the DNSPolicy, which includes important properties such as:
endpoint
: This is the path where the health checks take place, usually represented as '/healthz' or something similar. port
: Specific port for the connection to be checked. protocol
: Type of protocol being used, like HTTP or HTTPS. FailureThreshold
: How many times we can tolerate a failure on this endpoint, before removing the related DNS entry.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing: simple\n healthCheck:\n endpoint: \"/health\"\n port: 443\n protocol: \"HTTPS\"\n failureThreshold: 5\n
This configuration sets up a DNS health check in AWS Route53 which will connect by HTTPS on port 443 and request the path /health.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#reviewing-the-status-of-health-checks","title":"Reviewing the status of Health Checks","text":"The DNS Record CR will show whether the health check has been created or not in the DNS Provider, and will also show any errors encountered when trying to create or update the health check configuration.
To see the status of the executing health check requires logging in to the Route53 console to view the current probe results.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#reconfiguring-health-checks","title":"Reconfiguring Health Checks","text":"To reconfigure the health checks, update the HealthCheck section of the DNS Policy, this will be reflected into all the health checks created as a result of this policy.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#removing-health-checks","title":"Removing Health Checks","text":"To remove the health checks created in AWS, delete the healthcheck section of the DNS Policy. All health checks will be deleted automatically, if the DNS Policy is deleted.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#limitations","title":"Limitations","text":"As Route53 will only perform health checks on an IP address, currently do not create health checks on DNS Policies that target gateways with hostname addresses.
"},{"location":"kuadrant-operator/doc/dnshealthchecks/#other-providers","title":"Other Providers","text":"Although we intend to support integrating with the DNS Health checks provided by other DNS Providers in the future, we currently only support AWS Route53.
"},{"location":"kuadrant-operator/doc/logging/","title":"Logging","text":"The kuadrant operator outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information. Actions like creating, deleteing or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details.
To configure the desired log level, set the environment variable LOG_LEVEL
to one of the supported values listed above. Default log level is info
.
Apart from log level, the operator can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the environment variable LOG_MODE
to one of the supported values listed above. Default log level is production
.
"},{"location":"kuadrant-operator/doc/rate-limiting/","title":"Kuadrant Rate Limiting","text":"A Kuadrant RateLimitPolicy custom resource, often abbreviated \"RateLimitPolicy\":
- Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to rate limit.
- Supports targeting subsets (sections) of a network resource to apply the limits to.
- Abstracts the details of the underlying Rate Limit protocol and configuration resources, that have a much broader remit and surface area.
- Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/rate-limiting/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"Kuadrant's Rate Limit implementation relies on the Envoy's Rate Limit Service (RLS) protocol. The workflow per request goes:
- On incoming request, the gateway checks the matching rules for enforcing rate limits, as stated in the RateLimitPolicy custom resources and targeted Gateway API networking objects
- If the request matches, the gateway sends one RateLimitRequest to the external rate limiting service (\"Limitador\").
- The external rate limiting service responds with a RateLimitResponse back to the gateway with either an
OK
or OVER_LIMIT
response code.
A RateLimitPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external rate limiting service.
"},{"location":"kuadrant-operator/doc/rate-limiting/#the-ratelimitpolicy-custom-resource","title":"The RateLimitPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#overview","title":"Overview","text":"The RateLimitPolicy
spec includes, basically, two parts:
- A reference to an existing Gateway API resource (
spec.targetRef
) - Limit definitions (
spec.limits
)
Each limit definition includes:
- A set of rate limits (
spec.limits.<limit-name>.rates[]
) - (Optional) A set of dynamic counter qualifiers (
spec.limits.<limit-name>.counters[]
) - (Optional) A set of route selectors, to further qualify the specific routing rules when to activate the limit (
spec.limits.<limit-name>.routeSelectors[]
) - (Optional) A set of additional dynamic conditions to activate the limit (
spec.limits.<limit-name>.when[]
)
The limit definitions (limits
) can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults
or overrides
blocks.
Check out Kuadrant RFC 0002 to learn more about the Well-known Attributes that can be used to define counter qualifiers (counters
) and conditions (when
)."},{"location":"kuadrant-operator/doc/rate-limiting/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: my-rate-limit-policy\nspec:\n # Reference to an existing networking resource to attach the policy to. REQUIRED.\n # It can be a Gateway API HTTPRoute or Gateway resource.\n # It can only refer to objects in the same namespace as the RateLimitPolicy.\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n\n # The limits definitions to apply to the network traffic routed through the targeted resource.\n # Equivalent to if otherwise declared within `defaults`.\n limits:\n \"my_limit\":\n # The rate limits associated with this limit definition. REQUIRED.\n # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`\n rates: [\u2026]\n\n # Counter qualifiers.\n # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.\n # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.username`.\n # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n counters: [\u2026]\n\n # Further qualification of the scpecific HTTPRouteRules within the targeted HTTPRoute that should trigger the limit.\n # Each element contains a HTTPRouteMatch object that will be used to select HTTPRouteRules that include at least one identical HTTPRouteMatch.\n # The HTTPRouteMatch part does not have to be fully identical, but the what's stated in the selector must be identically stated in the HTTPRouteRule.\n # Do not use it on RateLimitPolicies that target a Gateway.\n routeSelectors: [\u2026]\n\n # Additional dynamic conditions to trigger the limit.\n # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.\n # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n when: [\u2026]\n\n # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n # routes that lack a more specific policy attached to.\n # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.\n defaults:\n limits: {\u2026}\n\n # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n # thus also overriding any more specific policy occasionally attached to any of those routes.\n # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.\n overrides:\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#using-the-ratelimitpolicy","title":"Using the RateLimitPolicy","text":""},{"location":"kuadrant-operator/doc/rate-limiting/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"When a RateLimitPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs
field of the HTTPRoute.
The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets, by specifying the routeSelectors
field of the limit definition.
Target a HTTPRoute by setting the spec.targetRef
field of the RateLimitPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: <RateLimitPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: <HTTPRoute Name>\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"If a RateLimitPolicy targets a route defined for *.com
and another RateLimitPolicy targets another route for api.com
, the Kuadrant control plane will not merge these two RateLimitPolicies. Unless one of the policies declare an overrides set of limites, the control plane will configure to mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and limit definitions.
E.g., by default, a request coming for api.com
will be rate limited according to the rules from the RateLimitPolicy that targets the route for api.com
; while a request for other.com
will be rate limited with the rules from the RateLimitPolicy targeting the route for *.com
.
See more examples in Overlapping Gateway and HTTPRoute RateLimitPolicies.
"},{"location":"kuadrant-operator/doc/rate-limiting/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"A RateLimitPolicy that targets a Gateway can declare a block of defaults (spec.defaults
) or a block of overrides (spec.overrides
). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.
When declaring defaults, a RateLimitPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific RateLimitPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default RateLimitPolicy, as well as changes in the existing HTTPRoutes.
Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting safe default limits on hostnames and hostname wildcards.
Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.
Target a Gateway HTTPRoute by setting the spec.targetRef
field of the RateLimitPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: <RateLimitPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n defaults: # alternatively: `overrides`\n limits: {\u2026}\n
"},{"location":"kuadrant-operator/doc/rate-limiting/#overlapping-gateway-and-httproute-ratelimitpolicies","title":"Overlapping Gateway and HTTPRoute RateLimitPolicies","text":"Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.
Gateway RateLimitPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute RateLimitPolicy exists, in which case the HTTPRoute RateLimitPolicy prevails.
Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):
- RateLimitPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy G (defaults) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 RateLimitPolicy A will be enforced - Request to
b.toystore.com
\u2192 RateLimitPolicy B will be enforced - Request to
other.toystore.com
\u2192 RateLimitPolicy W will be enforced - Request to
other.com
(suppose a route exists) \u2192 RateLimitPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
Gateway RateLimitPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute RateLimitPolicy.
Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):
- RateLimitPolicy A \u2192 HTTPRoute A (
a.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy B \u2192 HTTPRoute B (
b.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy W \u2192 HTTPRoute W (
*.toystore.com
) \u2192 Gateway G (*.com
) - RateLimitPolicy G (overrides) \u2192 Gateway G (
*.com
)
Expected behavior:
- Request to
a.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
b.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
other.toystore.com
\u2192 RateLimitPolicy G will be enforced - Request to
other.com
(suppose a route exists) \u2192 RateLimitPolicy G will be enforced - Request to
yet-another.net
(suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
"},{"location":"kuadrant-operator/doc/rate-limiting/#limit-definition","title":"Limit definition","text":"A limit will be activated whenever a request comes in and the request matches:
- any of the route rules selected by the limit (via
routeSelectors
or implicit \"catch-all\" selector), and - all of the
when
conditions specified in the limit.
A limit can define:
- counters that are qualified based on dynamic values fetched from the request, or
- global counters (implicitly, when no qualified counter is specified)
A limit is composed of one or more rate limits.
E.g.
spec:\n limits:\n \"toystore-all\":\n rates:\n\n - limit: 5000\n duration: 1\n unit: second\n\n \"toystore-api-per-username\":\n rates:\n\n - limit: 100\n duration: 1\n unit: second\n - limit: 1000\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n hostnames:\n - api.toystore.com\n\n \"toystore-admin-unverified-users\":\n rates:\n\n - limit: 250\n duration: 1\n unit: second\n routeSelectors:\n hostnames:\n - admin.toystore.com\n when:\n - selector: auth.identity.email_verified\n operator: eq\n value: \"false\"\n
Request to Rate limits enforced api.toystore.com
100rps/username or 1000rpm/username (whatever happens first) admin.toystore.com
250rps other.toystore.com
5000rps"},{"location":"kuadrant-operator/doc/rate-limiting/#route-selectors","title":"Route selectors","text":"Route selectors allow targeting sections of a HTTPRoute, by specifying sets of HTTPRouteMatches and/or hostnames that make the policy controller look up within the HTTPRoute spec for compatible declarations, and select the corresponding HTTPRouteRules and hostnames, to then build conditions that activate the policy or policy rule.
Check out Route selectors for a full description, semantics and API reference.
"},{"location":"kuadrant-operator/doc/rate-limiting/#when-conditions","title":"when
conditions","text":"when
conditions can be used to scope a limit (i.e. to filter the traffic to which a limit definition applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules via routeSelectors
.
Use when
conditions to conditionally activate limits based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames
and spec.rules.matches
fields, or in general in RateLimitPolicies that target a Gateway.
The selectors within the when
conditions of a RateLimitPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.
"},{"location":"kuadrant-operator/doc/rate-limiting/#examples","title":"Examples","text":"Check out the following user guides for examples of rate limiting services with Kuadrant:
- Simple Rate Limiting for Application Developers
- Authenticated Rate Limiting for Application Developers
- Gateway Rate Limiting for Cluster Operators
- Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/rate-limiting/#known-limitations","title":"Known limitations","text":" - One HTTPRoute can only be targeted by one RateLimitPolicy.
- One Gateway can only be targeted by one RateLimitPolicy.
- RateLimitPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the RateLimitPolicy.
- 2+ RateLimitPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/rate-limiting/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"Kuadrant currently does not support multiple RateLimitPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames
field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one RateLimitPolicy targeting any of those resources that specify identical hostnames is allowed.
Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.
This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that honored by the API gateway in cases of conflicting hostnames.
To exemplify one way this limitation can impact deployments, consider the following topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2 \u25b2\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 RateLimitPolicy \u2502 \u2502 RateLimitPolicy \u2502\n \u2502 (policy-1) \u2502 \u2502 (policy-2) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
In the example above, with the policy-1
resource created before policy-2
, policy-2
will be enforced on all requests to app.io/bar
while policy-1
will not be enforced at all. I.e. app.io/foo
will not be rate-limited. Nevertheless, both policies will report status condition as Enforced
.
Notice the enforcement of policy-2
and no enforcement of policy-1
is the opposite behavior as the analogous problem with the Kuadrant AuthPolicy.
A different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 RateLimitPolicy \u2502\n \u2502 \u2502 (policy-2) \u2502\n \u25bc \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 Gateway \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners: \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 \u2502 - host: *.io \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u2502 \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 HTTPRoute \u2502 \u2502 HTTPRoute \u2502\n\u2502 (route-a) \u2502 \u2502 (route-b) \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames: \u2502 \u2502 hostnames: \u2502\n\u2502 - app.io \u2502 \u2502 - app.io \u2502\n\u2502 rules: \u2502 \u2502 rules: \u2502\n\u2502 - matches: \u2502 \u2502 - matches: \u2502\n\u2502 - path: \u2502 \u2502 - path: \u2502\n\u2502 value: /foo \u2502 \u2502 value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 RateLimitPolicy \u2502\n \u2502 (policy-1) \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Once again, both policies will report status condition as Enforced
. However, in this case, only policy-1
will be enforced on requests to app.io/foo
, while policy-2
will not be enforced at all. I.e. app.io/bar
will not be not rate-limited. This is same behavior as the analogous problem with the Kuadrant AuthPolicy.
To avoid these problems, use different hostnames in each route.
"},{"location":"kuadrant-operator/doc/rate-limiting/#implementation-details","title":"Implementation details","text":"Driven by limitations related to how Istio injects configuration in the filter chains of the ingress gateways, Kuadrant relies on Envoy's Wasm Network filter in the data plane, to manage the integration with rate limiting service (\"Limitador\"), instead of the Rate Limit filter.
Motivation: Multiple rate limit domains The first limitation comes from having only one filter chain per listener. This often leads to one single global rate limiting filter configuration per gateway, and therefore to a shared rate limit domain across applications and policies. Even though, in a rate limit filter, the triggering of rate limit calls, via actions to build so-called \"descriptors\", can be defined at the level of the virtual host and/or specific route rule, the overall rate limit configuration is only one, i.e., always the same rate limit domain for all calls to Limitador.
On the other hand, the possibility to configure and invoke the rate limit service for multiple domains depending on the context allows to isolate groups of policy rules, as well as to optimize performance in the rate limit service, which can rely on the domain for indexation.
Motivation: Fine-grained matching rules A second limitation of configuring the rate limit filter via Istio, particularly from Gateway API resources, is that rate limit descriptors at the level of a specific HTTP route rule require \"named routes\" \u2013 defined only in an Istio VirtualService resource and referred in an EnvoyFilter one. Because Gateway API HTTPRoute rules lack a \"name\" property1, as well as the Istio VirtualService resources are only ephemeral data structures handled by Istio in-memory in its implementation of gateway configuration for Gateway API, where the names of individual route rules are auto-generated and not referable by users in a policy23, rate limiting by attributes of the HTTP request (e.g., path, method, headers, etc) would be very limited while depending only on Envoy's Rate Limit filter.
Motivated by the desire to support multiple rate limit domains per ingress gateway, as well as fine-grained HTTP route matching rules for rate limiting, Kuadrant implements a wasm-shim that handles the rules to invoke the rate limiting service, complying with Envoy's Rate Limit Service (RLS) protocol.
The wasm module integrates with the gateway in the data plane via Wasm Network filter, and parses a configuration composed out of user-defined RateLimitPolicy resources by the Kuadrant control plane. Whereas the rate limiting service (\"Limitador\") remains an implementation of Envoy's RLS protocol, capable of being integrated directly via Rate Limit extension or by Kuadrant, via wasm module for the Istio Gateway API implementation.
As a consequence of this design:
- Users can define fine-grained rate limit rules that match their Gateway and HTTPRoute definitions including for subsections of these.
- Rate limit definitions are insulated, not leaking across unrelated policies or applications.
- Conditions to activate limits are evaluated in the context of the gateway process, reducing the gRPC calls to the external rate limiting service only to the cases where rate limit counters are known in advance to have to be checked/incremented.
- The rate limiting service can rely on the indexation to look up for groups of limit definitions and counters.
- Components remain compliant with industry protocols and flexible for different integration options.
A Kuadrant wasm-shim configuration for 2 RateLimitPolicy custom resources (a Gateway default RateLimitPolicy and a HTTPRoute RateLimitPolicy) looks like the following and it is generated automatically by the Kuadrant control plane:
apiVersion: extensions.istio.io/v1alpha1\nkind: WasmPlugin\nmetadata:\n name: kuadrant-istio-ingressgateway\n namespace: istio-system\n \u2026\nspec:\n phase: STATS\n pluginConfig:\n failureMode: deny\n rateLimitPolicies:\n\n - domain: istio-system/gw-rlp # allows isolating policy rules and improve performance of the rate limit service\n hostnames:\n - '*.website'\n - '*.io'\n name: istio-system/gw-rlp\n rules: # match rules from the gateway and according to conditions specified in the policy\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /\n data:\n - static: # tells which rate limit definitions and counters to activate\n key: limit.internet_traffic_all__593de456\n value: \"1\"\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /\n - operator: endswith\n selector: request.host\n value: .io\n data:\n - static:\n key: limit.internet_traffic_apis_per_host__a2b149d2\n value: \"1\"\n - selector:\n selector: request.host\n service: kuadrant-rate-limiting-service\n - domain: default/app-rlp\n hostnames:\n - '*.toystore.website'\n - '*.toystore.io'\n name: default/app-rlp\n rules: # matches rules from a httproute and additional specified in the policy\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /assets/\n data:\n - static:\n key: limit.toystore_assets_all_domains__8cfb7371\n value: \"1\"\n - conditions:\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /v1/\n - operator: eq\n selector: request.method\n value: GET\n - operator: endswith\n selector: request.host\n value: .toystore.website\n - operator: eq\n selector: auth.identity.username\n value: \"\"\n - allOf:\n - operator: startswith\n selector: request.url_path\n value: /v1/\n - operator: eq\n selector: request.method\n value: POST\n - operator: endswith\n selector: request.host\n value: .toystore.website\n - operator: eq\n selector: auth.identity.username\n value: \"\"\n data:\n - static:\n key: limit.toystore_v1_website_unauthenticated__3f9c40c6\n value: \"1\"\n service: kuadrant-rate-limiting-service\n selector:\n matchLabels:\n istio.io/gateway-name: istio-ingressgateway\n url: oci://quay.io/kuadrant/wasm-shim:v0.3.0\n
-
https://github.com/kubernetes-sigs/gateway-api/pull/996\u00a0\u21a9
-
https://github.com/istio/istio/issues/36790\u00a0\u21a9
-
https://github.com/istio/istio/issues/37346\u00a0\u21a9
"},{"location":"kuadrant-operator/doc/tls/","title":"TLS","text":"A Kuadrant TLSPolicy custom resource:
- Targets Gateway API networking resources Gateways to provide tls for gateway listeners by managing the lifecycle of tls certificates using
CertManager
.
"},{"location":"kuadrant-operator/doc/tls/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/tls/#the-tlspolicy-custom-resource","title":"The TLSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/tls/#overview","title":"Overview","text":"The TLSPolicy
spec includes the following parts:
- A reference to an existing Gateway API resource (
spec.targetRef
)
"},{"location":"kuadrant-operator/doc/tls/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"apiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: my-tls-policy\nspec:\n # reference to an existing networking resource to attach the policy to\n # it can only be a Gateway API Gateway resource\n # it can only refer to objects in the same namespace as the TLSPolicy\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: mygateway\n
Check out the API reference for a full specification of the TLSPolicy CRD.
"},{"location":"kuadrant-operator/doc/tls/#using-the-tlspolicy","title":"Using the TLSPolicy","text":""},{"location":"kuadrant-operator/doc/tls/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"When a TLSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a valid TLS section.
Target a Gateway by setting the spec.targetRef
field of the TLSPolicy as follows:
apiVersion: kuadrant.io/v1beta2\nkind: TLSPolicy\nmetadata:\n name: <TLSPolicy name>\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/tls/#examples","title":"Examples","text":"Check out the following user guides for examples of using the Kuadrant TLSPolicy:
"},{"location":"kuadrant-operator/doc/tls/#known-limitations","title":"Known limitations","text":""},{"location":"kuadrant-operator/doc/install/install-openshift/","title":"Install Kuadrant on an OpenShift cluster","text":"NOTE: You must perform these steps on each OpenShift cluster that you want to use Kuadrant on.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#prerequisites","title":"Prerequisites","text":" - OpenShift Container Platform 4.14.x or later with community Operator catalog available.
- AWS account with Route 53 and zone.
- Accessible Redis instance.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#procedure","title":"Procedure","text":""},{"location":"kuadrant-operator/doc/install/install-openshift/#step-1-set-up-your-environment","title":"Step 1 - Set up your environment","text":"export AWS_ACCESS_KEY_ID=xxxxxxx # Key ID from AWS with Route 53 access\nexport AWS_SECRET_ACCESS_KEY=xxxxxxx # Access key from AWS with Route 53 access\nexport REDIS_URL=redis://user:xxxxxx@some-redis.com:10340 # A Redis cluster URL\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-2-install-gateway-api-v1","title":"Step 2 - Install Gateway API v1","text":"Before you can use Kuadrant, you must install Gateway API v1 as follows:
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-3-install-and-configure-istio-with-the-sail-operator","title":"Step 3 - Install and configure Istio with the Sail Operator","text":"Kuadrant integrates with Istio as a Gateway API provider. You can set up an Istio-based Gateway API provider by using the Sail Operator.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#install-istio","title":"Install Istio","text":"To install the Istio Gateway provider, run the following commands:
kubectl create ns istio-system\n
kubectl apply -f - <<EOF\nkind: OperatorGroup\napiVersion: operators.coreos.com/v1\nmetadata:\n name: sail\n namespace: istio-system\nspec: \n upgradeStrategy: Default \n--- \napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: sailoperator\n namespace: istio-system\nspec:\n channel: 3.0-dp1\n installPlanApproval: Automatic\n name: sailoperator\n source: community-operators\n sourceNamespace: openshift-marketplace\nEOF\n
Check the status of the installation as follows:
kubectl get installplan -n istio-system -o=jsonpath='{.items[0].status.phase}'\n
When ready, the status will change from installing
to complete
.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#configure-istio","title":"Configure Istio","text":"To configure the Istio Gateway API provider, run the following command:
kubectl apply -f - <<EOF\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n name: default\nspec:\n version: v1.21.0\n namespace: istio-system\n # Disable autoscaling to reduce dev resources\n values:\n pilot:\n autoscaleEnabled: false\nEOF\n
Wait for Istio to be ready as follows:
kubectl wait istio/default -n istio-system --for=\"condition=Ready=true\"\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-4-optional-configure-observability-and-metrics","title":"Step 4 - Optional: Configure observability and metrics","text":"Kuadrant provides a set of example dashboards that use known metrics exported by Kuadrant and Gateway components to provide insight into different components of your APIs and Gateways. While not essential, it is best to set up an OpenShift monitoring stack. This section provides links to OpenShift and Thanos documentation on configuring monitoring and metrics storage.
You can set up user-facing monitoring by following the steps in the OpenShift documentation on configuring the monitoring stack.
If you have user workload monitoring enabled, it is best to configure remote writes to a central storage system such as Thanos:
- OpenShift remote write configuration
- Kube Thanos
The example dashboards and alerts for observing Kuadrant functionality use low-level CPU metrics and network metrics available from the user monitoring stack in OpenShift. They also use resource state metrics from Gateway API and Kuadrant resources.
To scrape these additional metrics, you can install a kube-state-metrics instance
, with a custom resource configuration as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/kube-state-metrics.yaml\nkubectl apply -k https://github.com/Kuadrant/gateway-api-state-metrics?ref=main\n
To enable request metrics in Istio, you must create a telemetry
resource as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/config/observability/openshift/telemetry.yaml\n
If you have Grafana installed in your cluster, you can import the example dashboards and alerts.
For example installation details, see installing Grafana on OpenShift. When installed, you must add your Thanos instance as a data source to Grafana. Alternatively, if you are using only the user workload monitoring stack in your OpenShift cluster, and not writing metrics to an external Thanos instance, you can set up a data source to the thanos-querier route in the OpenShift cluster.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-5-create-secrets-for-your-credentials","title":"Step 5 - Create secrets for your credentials","text":"Before installing the Kuadrant Operator, you must enter the following commands to set up secrets that you will use later:
kubectl create ns kuadrant-system\n
Set up a CatalogSource
as follows:
kubectl apply -f - <<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n name: kuadrant-operator-catalog\n namespace: kuadrant-system\nspec:\n sourceType: grpc\n image: quay.io/kuadrant/kuadrant-operator-catalog:v0.8.0\n displayName: Kuadrant Operators\n publisher: grpc\n updateStrategy:\n registryPoll:\n interval: 45m\nEOF \n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#aws-route-53-credentials-for-tls","title":"AWS Route 53 credentials for TLS","text":"Set the AWS Route 53 credentials for TLS verification as follows:
kubectl -n kuadrant-system create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#redis-credentials-for-rate-limiting-counters","title":"Redis credentials for rate limiting counters","text":"Set the Redis credentials for shared multicluster counters for the Kuadrant Limitador component as follows:
kubectl -n kuadrant-system create secret generic redis-config \\\n --from-literal=URL=$REDIS_URL \n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#aws-route-53-credentials-for-dns","title":"AWS Route 53 credentials for DNS","text":"Set the AWS Route 53 credentials for managing DNS records as follows:
kubectl create ns ingress-gateway\n
kubectl -n ingress-gateway create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-6-install-the-kuadrant-operator","title":"Step 6 - Install the Kuadrant Operator","text":"To install the Kuadrant Operator, enter the following command:
kubectl apply -f - <<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: kuadrant-operator\n namespace: kuadrant-system\nspec:\n channel: stable\n installPlanApproval: Automatic\n name: kuadrant-operator\n source: kuadrant-operator-catalog\n sourceNamespace: kuadrant-system\n---\nkind: OperatorGroup\napiVersion: operators.coreos.com/v1\nmetadata:\n name: kuadrant\n namespace: kuadrant-system\nspec: \n upgradeStrategy: Default\nEOF\n
Wait for the Kuadrant Operators to be installed as follows:
kubectl get installplan -n kuadrant-system -o=jsonpath='{.items[0].status.phase}'\n
After some time, this command should return complete
.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#step-7-configure-kuadrant","title":"Step 7 - Configure Kuadrant","text":"To configure your Kuadrant deployment, enter the following command:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\n namespace: kuadrant-system\nspec:\n limitador:\n storage:\n redis-cached:\n configSecretRef:\n name: redis-config \nEOF \n
Wait for Kuadrant to be ready as follows:
kubectl wait kuadrant/kuadrant --for=\"condition=Ready=true\" -n kuadrant-system --timeout=300s\n
Kuadrant is now ready to use.
"},{"location":"kuadrant-operator/doc/install/install-openshift/#next-steps","title":"Next steps","text":" - Secure, protect, and connect APIs with Kuadrant on OpenShift
"},{"location":"kuadrant-operator/doc/observability/examples/","title":"Example Dashboards and Alerts","text":"Explore a variety of starting points for monitoring your Kuadrant installation with our examples folder. These dashboards and alerts are ready-to-use and easily customizable to fit your environment.
There are some example dashboards uploaded to Grafana.com . You can use the ID's listed below to import these dashboards into Grafana:
Name ID App Developer Dashboard 20970
Business User Dashboard 20981
Platform Engineer Dashboard 20982
"},{"location":"kuadrant-operator/doc/observability/examples/#dashboards","title":"Dashboards","text":""},{"location":"kuadrant-operator/doc/observability/examples/#importing-dashboards-into-grafana","title":"Importing Dashboards into Grafana","text":"For more details on how to import dashboards into Grafana, visit the import dashboards page.
- UI Method:
- JSON - Use the 'Import' feature in the Grafana UI to upload dashboard JSON files directly.
- ID - Use the 'Import' feature in the Grafana UI to import via Grafana.com using a Dashboard ID.
- ConfigMap Method: Automate dashboard provisioning by adding files to a ConfigMap, which should be mounted at
/etc/grafana/provisioning/dashboards
.
Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.
"},{"location":"kuadrant-operator/doc/observability/examples/#alerts","title":"Alerts","text":""},{"location":"kuadrant-operator/doc/observability/examples/#setting-up-alerts-in-prometheus","title":"Setting Up Alerts in Prometheus","text":"Integrate alerts into Prometheus using a PrometheusRule
resource. Adjust alert thresholds to meet your specific operational needs.
Further information on the metrics used for these alerts can be found on the metrics page.
"},{"location":"kuadrant-operator/doc/observability/metrics/","title":"Metrics","text":"This is a reference page for some of the different metrics used in example dashboards and alerts. It is not an exhaustive list. The documentation for each component may provide more details on a per-component basis. Some of the metrics are sourced from components outside the Kuadrant project, for example, Envoy. The value of this reference is showing some of the more widely desired metrics, and how to join the metrics from different sources together in a meaningful way.
"},{"location":"kuadrant-operator/doc/observability/metrics/#metrics-sources","title":"Metrics sources","text":" - Kuadrant components
- Istio
- Envoy
- Kube State Metrics
- Gateway API State Metrics
- Kubernetes metrics
"},{"location":"kuadrant-operator/doc/observability/metrics/#resource-usage-metrics","title":"Resource usage metrics","text":"Resource metrics, like CPU, memory and disk usage, primarily come from the Kubernetes metrics components. These include container_cpu_usage_seconds_total
, container_memory_working_set_bytes
and kubelet_volume_stats_used_bytes
. A stable list of metrics is maintained in the Kubernetes repository. These low-level metrics typically have a set of recording rules that aggregate values by labels and time ranges. For example, node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
or namespace_workload_pod:kube_pod_owner:relabel
. If you have deployed the kube-prometheus project, you should have the majority of these metrics being scraped.
"},{"location":"kuadrant-operator/doc/observability/metrics/#networking-metrics","title":"Networking metrics","text":"Low-level networking metrics like container_network_receive_bytes_total
are also available from the Kubernetes metrics components. HTTP & GRPC traffic metrics with higher level labels are available from Istio. One of the main metrics would be istio_requests_total
, which is a counter incremented for every request handled by an Istio proxy. Latency metrics are available via the istio_request_duration_milliseconds
metric, with buckets for varying response times.
Some example dashboards have panels that make use of the request URL path. The path is not added as a label to Istio metrics by default, as it has the potential to increase metric cardinality, and thus storage requirements. If you want to make use of the path in your queries or visualisations, you can enable the request path metric via the Telemetry resource in istio:
apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n name: namespace-metrics\n namespace: istio-system\nspec:\n metrics:\n\n - providers:\n - name: prometheus\n overrides:\n - match:\n metric: REQUEST_COUNT\n tagOverrides:\n request_url_path:\n value: \"request.url_path\"\n - match: \n metric: REQUEST_DURATION\n tagOverrides:\n request_url_path:\n value: \"request.url_path\"\n
"},{"location":"kuadrant-operator/doc/observability/metrics/#state-metrics","title":"State metrics","text":"The kube-state-metrics project exposes the state of various kuberenetes resources as metrics and labels. For example, the ready status
of a Pod
is available as kube_pod_status_ready
, with labels for the pod name
and namespace
. This can be useful for linking lower level container metrics back to a meaningful resource in the Kubernetes world.
"},{"location":"kuadrant-operator/doc/observability/metrics/#joining-metrics","title":"Joining metrics","text":"Metric queries can be as simple as just the name of the metric, or can be complex with joining & grouping. A lot of the time it can be useful to tie back low level metrics to more meaningful Kubernetes resources. For example, if the memory usage is maxed out on a container and that container is constantly being OOMKilled, it can be useful to get the Deployment and Namespace of that container for debugging. Prometheus query language (or promql) allows vector matching or results (sometimes called joining).
When using Gateway API and Kuadrant resources like HTTPRoute and RateLimitPolicy, the state metrics can be joined to Istio metrics to give a meaningful result set. Here's an example that queries the number of requests per second, and includes the name of the HTTPRoute that the traffic is for.
sum(\n rate(\n istio_requests_total{}[5m]\n )\n) by (destination_service_name)\n\n\n* on(destination_service_name) group_right \n label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n
Breaking this query down, there are 2 parts. The first part is getting the rate of requests hitting the Istio gateway, aggregated to 5m intervals:
sum(\n rate(\n destination_service_name{}[5m]\n )\n) by (destination_service_name)\n
The result set here will include a label for the destination service name (i.e. the Service in Kubernetes). This label is key to looking up the HTTPRoute this traffic belongs to.
The 2nd part of the query uses the gatewayapi_httproute_labels
metric and the label_replace
function. The gatewayapi_httproute_labels
metric gives a list of all httproutes, including any labels on them. The HTTPRoute in this example has a label called 'service', set to be the same as the Istio service name. This allows us to join the 2 results set. However, because the label doesn't match exactly (destination_service_name
and service
), we can replace the label so that it does match. That's what the label_replace
does.
label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n
The 2 parts are joined together using vector matching.
* on(destination_service_name) group_right \n
*
is the binary operator i.e. multiplication (gives join like behaviour) on()
specifies which labels to \"join\" the 2 results with group_right
enables a one to many matching.
See the Prometheus documentation for further details on matching.
"},{"location":"kuadrant-operator/doc/observability/tracing/","title":"Enabling tracing with a central collector","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#introduction","title":"Introduction","text":"This guide outlines the steps to enable tracing in Istio and Kuadrant components (Authorino and Limitador), directing traces to a central collector for improved observability and troubleshooting. We'll also explore a typical troubleshooting flow using traces and logs.
"},{"location":"kuadrant-operator/doc/observability/tracing/#prerequisites","title":"Prerequisites","text":" - A Kubernetes cluster with Istio and Kuadrant installed.
- A trace collector (e.g., Jaeger or Tempo) configured to support OpenTelemetry (OTel).
"},{"location":"kuadrant-operator/doc/observability/tracing/#configuration-steps","title":"Configuration Steps","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#istio-tracing-configuration","title":"Istio Tracing Configuration","text":"Enable tracing in Istio by using the Telemetry API. Depending on your method for installing Istio, you will need to configure a tracing extensionProvider
in your MeshConfig, Istio or IstioOperator resource as well. Here is an example Telemetry and Istio config to sample 100% of requests, if using the Istio Sail Operator.
apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n name: mesh-default\n namespace: istio-system\nspec:\n tracing:\n\n - providers:\n - name: tempo-otlp\n randomSamplingPercentage: 100\n---\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n name: default\nspec:\n namespace: istio-system\n values:\n meshConfig:\n defaultConfig:\n tracing: {}\n enableTracing: true\n extensionProviders:\n - name: tempo-otlp\n opentelemetry:\n port: 4317\n service: tempo.tempo.svc.cluster.local\n
"},{"location":"kuadrant-operator/doc/observability/tracing/#kuadrant-tracing-configuration","title":"Kuadrant Tracing Configuration","text":"The Authorino and Limitador components have request tracing capabilities. Here is an example configuration to enable and send traces to a central collector. Ensure the collector is the same one that Istio is sending traces so that they can be correlated later.
apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n tracing:\n endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n insecure: true\n---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador\nspec:\n tracing:\n endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n
Once the changes are applied, the authorino and limitador components will be redeployed tracing enabled.
Note:
There are plans to consolidate the tracing configuration to a single location i.e. the Kuadrant CR. This will eventually eliminate the need to configure tracing in both the Authorino and Limitador CRs.
Important:
Currently, trace IDs do not propagate to wasm modules in Istio/Envoy, affecting trace continuity in Limitador. This means that requests passed to limitador will not have the relavant 'parent' trace ID in its trace information. If however the trace initiation point is outside of Envoy/Istio, the 'parent' trace ID will be available to limitador and included in traces passed to the collector. This has an impact on correlating traces from limitador with traces from authorino, the gateway and any other components in the path of requests.
"},{"location":"kuadrant-operator/doc/observability/tracing/#troubleshooting-flow-using-traces-and-logs","title":"Troubleshooting Flow Using Traces and Logs","text":"Using a tracing interface like the Jaeger UI or Grafana, you can search for trace information by the trace ID. You may get the trace ID from logs, or from a header in a sample request you want to troubleshoot. You can also search for recent traces, filtering by the service you want to focus on.
Here is an example trace in the Grafana UI showing the total request time from the gateway (Istio), the time to check the curent rate limit count (and update it) in limitador and the time to check auth in Authorino:
In limitador, it is possible to enable request logging with trace IDs to get more information on requests. This requires the log level to be increased to at least debug, so the verbosity must be set to 3 or higher in the Limitador CR. For example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador\nspec:\n verbosity: 3\n
A log entry will look something like this, with the traceparent
field holding the trace ID:
\"Request received: Request { metadata: MetadataMap { headers: {\"te\": \"trailers\", \"grpc-timeout\": \"5000m\", \"content-type\": \"application/grpc\", \"traceparent\": \"00-4a2a933a23df267aed612f4694b32141-00f067aa0ba902b7-01\", \"x-envoy-internal\": \"true\", \"x-envoy-expected-rq-timeout-ms\": \"5000\"} }, message: RateLimitRequest { domain: \"default/toystore\", descriptors: [RateLimitDescriptor { entries: [Entry { key: \"limit.general_user__f5646550\", value: \"1\" }, Entry { key: \"metadata.filter_metadata.envoy\\\\.filters\\\\.http\\\\.ext_authz.identity.userid\", value: \"alice\" }], limit: None }], hits_addend: 1 }, extensions: Extensions }\"\n
If you centrally aggregate logs using something like promtail and loki, you can jump between trace information and the relevant logs for that service:
Using a combination of tracing and logs, you can visualise and troubleshoot reuqest timing issues and drill down to specific services. This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/","title":"RLP can target a Gateway resource","text":"Previous version: https://hackmd.io/IKEYD6NrSzuGQG1nVhwbcw
Based on: https://hackmd.io/_1k6eLCNR2eb9RoSzOZetg
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#introduction","title":"Introduction","text":"The current RateLimitPolicy CRD already implements a targetRef
with a reference to Gateway API's HTTPRoute. This doc captures the design and some implementation details of allowing the targetRef
to reference a Gateway API's Gateway.
Having in place this HTTPRoute - Gateway hierarchy, we are also considering to apply Policy Attachment's defaults/overrides approach to the RateLimitPolicy CRD. But for now, it will only be about targeting the Gateway resource.
On designing Kuadrant's rate limiting and considering Istio/Envoy's rate limiting offering, we hit two limitations (described here). Therefore, not giving up entirely in existing Envoy's RateLimit Filter, we decided to move on and leverage the Envoy's Wasm Network Filter and implement rate limiting wasm-shim module compliant with the Envoy's Rate Limit Service (RLS). This wasm-shim module accepts a PluginConfig struct object as input configuration object.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#use-cases-targeting-a-gateway","title":"Use Cases targeting a gateway","text":"A key use case is being able to provide governance over what service providers can and cannot do when exposing a service via a shared ingress gateway. As well as providing certainty that no service is exposed without my ability as a cluster administrator to protect my infrastructure from unplanned load from badly behaving clients etc.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#goals","title":"Goals","text":"The goal of this document is to define:
- The schema of this
PluginConfig
struct. - The kuadrant-operator behavior filling the
PluginConfig
struct having as input the RateLimitPolicy k8s objects - The behavior of the wasm-shim having the
PluginConfig
struct as input.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"Kuadrant's rate limit relies on the Rate Limit Service (RLS) protocol, hence the gateway generates, based on a set of actions, a set of descriptors (one descriptor is a set of descriptor entries). Those descriptors are send to the external rate limit service provider. When multiple descriptors are provided, the external service provider will limit on ALL of them and return an OVER_LIMIT response if any of them are over limit.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-crd-of-the-ratelimitpolicy","title":"Schema (CRD) of the RateLimitPolicy","text":"---\napiVersion: kuadrant.io/v1beta1\nkind: RateLimitPolicy\nmetadata:\n name: my-rate-limit-policy\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute / Gateway\n name: myroute / mygateway\n rateLimits:\n\n - rules:\n - paths: [\"/admin/*\"]\n methods: [\"GET\"]\n hosts: [\"example.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"yes\"\n limits:\n - conditions: [\"admin == yes\"]\n max_value: 500\n seconds: 30\n variables: []\n
.spec.rateLimits
holds a list of rate limit configurations represented by the object RateLimit
. Each RateLimit
object represents a complete rate limit configuration. It contains three fields:
-
rules
(optional): Rules allow matching hosts
and/or methods
and/or paths
. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.
-
configurations
(required): Specifies a set of rate limit configurations that could be applied. The rate limit configuration object is the equivalent of the config.route.v3.RateLimit envoy object. One configuration is, in turn, a list of rate limit actions. Each action populates a descriptor entry. A vector of descriptor entries compose a descriptor. Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor. These rate limiting configuration rules provide flexibility to produce multiple descriptors. For example, you may want to define one generic rate limit descriptor and another descriptor depending on some header. If the header does not exist, the second descriptor is not generated, but traffic keeps being rate limited based on the generic descriptor.
configurations:\n\n - actions:\n - request_headers:\n header_name: \"X-MY-CUSTOM-HEADER\"\n descriptor_key: \"custom-header\"\n skip_if_absent: true\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
limits
(optional): configuration of the rate limiting service (Limitador). Check out limitador documentation for more information about the fields of each Limit
object.
Note: No namespace
/domain
defined. Kuadrant operator will figure out.
Note: There is no PREAUTH
, POSTAUTH
stage defined. Ratelimiting filter should be placed after authorization filter to enable authenticated rate limiting. In the future, stage
can be implemented.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#kuadrant-operators-behavior","title":"Kuadrant-operator's behavior","text":"One HTTPRoute can only be targeted by one rate limit policy.
Similarly, one Gateway can only be targeted by one rate limit policy.
However, indirectly, one gateway will be affected by multiple rate limit policies. It is by design of the Gateway API, one gateway can be referenced by multiple HTTPRoute objects. Furthermore, one HTTPRoute can reference multiple gateways.
The kuadrant operator will aggregate all the rate limit policies that apply for each gateway, including RLP targeting HTTPRoutes and Gateways.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#virtualhosting-ratelimitpolicies","title":"\"VirtualHosting\" RateLimitPolicies","text":"Rate limit policies are scoped by the domains defined at the referenced HTTPRoute's hostnames and Gateway's Listener's Hostname.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#multiple-httproutes-with-the-same-hostname","title":"Multiple HTTPRoutes with the same hostname","text":"When there are multiple HTTPRoutes with the same hostname, HTTPRoutes are all admitted and envoy merge the routing configuration in the same virtualhost. In these cases, the control plane has to \"merge\" the rate limit configuration into a single entry for the wasm filter.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#overlapping-httproutes","title":"Overlapping HTTPRoutes","text":"If some RLP targets a route for *.com
and other RLP targets another route for api.com
, the control plane does not do any merging. A request coming for api.com
will be rate limited with the rules from the RLP targeting the route api.com
. Also, a request coming for other.com
will be rate limited with the rules from the RLP targeting the route *.com
.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#examples","title":"examples","text":"RLP A -> HTTPRoute A (api.toystore.com
) -> Gateway G (*.com
)
RLP B -> HTTPRoute B (other.toystore.com
) -> Gateway G (*.com
)
RLP H -> HTTPRoute H (*.toystore.com
) -> Gateway G (*.com
)
RLP G -> Gateway G (*.com
)
Request 1 (api.toystore.com
) -> apply RLP A and RLP G
Request 2 (other.toystore.com
) -> apply RLP B and RLP G
Request 3 (unknown.toystore.com
) -> apply RLP H and RLP G
Request 4 (other.com
) -> apply RLP G
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-domain-limitador-namespace","title":"rate limit domain / limitador namespace","text":"The kuadrant operator will add domain
attribute of the Envoy's Rate Limit Service (RLS). It will also add the namespace
attribute of the Limitador's rate limit config. The operator will ensure that the associated actions and rate limits have a common domain/namespace.
The value of this domain/namespace seems to be related to the virtualhost for which rate limit applies.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-of-the-wasm-filter-configuration-object-the-pluginconfig","title":"Schema of the WASM filter configuration object: the PluginConfig
","text":"Currently the PluginConfig looks like this:
# The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nratelimitpolicies:\n default/toystore: # rate limit policy {NAMESPACE/NAME}\n hosts: # HTTPRoute hostnames\n\n - '*.toystore.com'\n rules: # route level actions\n - operations:\n - paths:\n - /admin/toy\n methods:\n - POST\n - DELETE\n actions:\n - generic_key:\n descriptor_value: yes\n descriptor_key: admin\n global_actions: # virtualHost level actions\n - generic_key:\n descriptor_value: yes\n descriptor_key: vhaction\n upstream_cluster: rate-limit-cluster # Limitador address reference\n domain: toystore-app # RLS protocol domain value\n
Proposed new design for the WASM filter configuration object (PluginConfig
struct):
# The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nrate_limit_policies:\n\n - name: toystore\n rate_limit_domain: toystore-app\n upstream_cluster: rate-limit-cluster\n hostnames: [\"*.toystore.com\"]\n gateway_actions:\n - rules:\n - paths: [\"/admin/toy\"]\n methods: [\"GET\"]\n hosts: [\"pets.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
Update highlights:
- [minor]
rate_limit_policies
is a list instead of a map indexed by the name/namespace. - [major] no distinction between \"rules\" and global actions
- [major] more aligned with RLS: multiple descriptors structured by \"rate limit configurations\" with matching rules
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#wasm-shim","title":"WASM-SHIM","text":"WASM filter rate limit policies are not exactly the same as user managed RateLimitPolicy custom resources. The WASM filter rate limit policies is part of the internal configuration and therefore not exposed to the end user.
At the WASM filter level, there are no route level or gateway level rate limit policies. The rate limit policies in the wasm plugin configuration may not map 1:1 to user managed RateLimitPolicy custom resources. WASM rate limit policies have an internal logical name and a set of hostnames to activate it based on the incoming request\u2019s host header.
The WASM filter builds a tree based data structure holding the rate limit policies. The longest (sub)domain match is used to select the policy to be applied. Only one policy is being applied per invocation.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-configurations","title":"rate limit configurations","text":"The WASM filter configuration object contains a list of rate limit configurations to build a list of Envoy's RLS descriptors. These configurations are defined at
rate_limit_policies[*].gateway_actions[*].configurations\n
For example:
configurations:\n\n- actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
How to read the policy:
-
Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor.
-
Each policy configuration has associated, optionally, a set of rules to match. Rules allow matching hosts
and/or methods
and/or paths
. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.
-
Each configuration object defines a list of actions. Each action may (or may not) produce a descriptor entry (descriptor list item). If an action cannot append a descriptor entry, no descriptor is generated for the configuration.
Note: The external rate limit service will be called when the gateway_actions
object produces at least one not empty descriptor.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#example","title":"example","text":"WASM filter rate limit policy for *.toystore.com
. I want some rate limit descriptors configuration only for api.toystore.com
and another set of descriptors for admin.toystore.com
. The wasm filter config would look like this:
failure_mode_deny: true\nrate_limit_policies:\n\n - name: toystore\n rate_limit_domain: toystore-app\n upstream_cluster: rate-limit-cluster\n hostnames: [\"*.toystore.com\"]\n gateway_actions:\n - configurations: # no rules. Applies to all *.toystore.com traffic\n - actions:\n - generic_key:\n descriptor_key: toystore-app\n descriptor_value: \"1\"\n - rules:\n - hosts: [\"api.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: api\n descriptor_value: \"1\"\n - rules:\n - hosts: [\"admin.toystore.com\"]\n configurations:\n - actions:\n - generic_key:\n descriptor_key: admin\n descriptor_value: \"1\"\n
- When a request for
api.toystore.com
hits the filter, the descriptors generated would be:
descriptor 1
(\"toystore-app\", \"1\")\n
descriptor 2 (\"api\", \"1\")\n
- When a request for
admin.toystore.com
hits the filter, the descriptors generated would be:
descriptor 1
(\"toystore-app\", \"1\")\n
descriptor 2 (\"admin\", \"1\")\n
- When a request for
other.toystore.com
hits the filter, the descriptors generated would be: descriptor 1 (\"toystore-app\", \"1\")\n
"},{"location":"kuadrant-operator/doc/reference/authpolicy/","title":"The AuthPolicy Custom Resource Definition (CRD)","text":" - AuthPolicy
- AuthPolicySpec
- AuthScheme
- AuthRuleCommon
- AuthenticationRule
- MetadataRule
- AuthorizationRule
- ResponseSpec
- SuccessResponseSpec
- SuccessResponseItem
- CallbackRule
- NamedPattern
- AuthPolicyCommonSpec
- AuthPolicyStatus
- ConditionSpec
"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicy","title":"AuthPolicy","text":"Field Type Required Description spec
AuthPolicySpec Yes The specification for AuthPolicy custom resource status
AuthPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicyspec","title":"AuthPolicySpec","text":"Field Type Required Description targetRef
PolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to rules
AuthScheme No Implicit default authentication/authorization rules routeSelectors
[]RouteSelector No List of implicit default selectors of HTTPRouteRules whose matching rules activate the policy. At least one HTTPRouteRule must be selected to activate the policy. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the policy. Do not use it in policies targeting a Gateway. patterns
MapNamedPattern> No Implicit default named patterns of lists of selector
, operator
and value
tuples, to be reused in when
conditions and pattern-matching authorization rules. when
[]PatternExpressionOrRef No List of implicit default additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway. defaults
AuthPolicyCommonSpec No Explicit default definitions. This field is mutually exclusive with any of the implicit default definitions: spec.rules
, spec.routeSelectors
, spec.patterns
, spec.when
overrides
AuthPolicyCommonSpec No Atomic overrides definitions. This field is mutually exclusive with any of the implicit or explicit default definitions: spec.rules
, spec.routeSelectors
, spec.patterns
, spec.when
, spec.default
"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicycommonspec","title":"AuthPolicyCommonSpec","text":"Field Type Required Description rules
AuthScheme No Authentication/authorization rules routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the policy. At least one HTTPRouteRule must be selected to activate the policy. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the policy. Do not use it in policies targeting a Gateway. patterns
MapNamedPattern> No Named patterns of lists of selector
, operator
and value
tuples, to be reused in when
conditions and pattern-matching authorization rules. when
[]PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authscheme","title":"AuthScheme","text":"Field Type Required Description authentication
MapAuthenticationRule> No Authentication rules. At least one config MUST evaluate to a valid identity object for the auth request to be successful. If omitted or empty, anonymous access is assumed. metadata
MapMetadataRule> No Rules for fetching auth metadata from external sources. authorization
MapAuthorizationRule> No Authorization rules. All policies MUST allow access for the auth request be successful. response
ResponseSpec No Customizations to the response to the authorization request. Use it to set custom values for unauthenticated, unauthorized, and/or success access request. callbacks
MapCallbackRule> No Rules for post-authorization callback requests to external services. Triggered regardless of the result of the authorization request."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authrulecommon","title":"AuthRuleCommon","text":"Field Type Required Description routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the auth rule. At least one HTTPRouteRule must be selected to activate the auth rule. If omitted, the auth rule is activated at all requests where the policy is enforced. Do not use it in policies targeting a Gateway. when
[]PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the auth rule. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway. cache
Caching spec No Caching options for the resolved object returned when applying this auth rule. (Default: disabled) priority
Integer No Priority group of the auth rule. All rules in the same priority group are evaluated concurrently; consecutive priority groups are evaluated sequentially. (Default: 0
) metrics
Boolean No Whether the auth rule emits individual observability metrics. (Default: false
)"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authenticationrule","title":"AuthenticationRule","text":"Field Type Required Description apiKey
API Key authentication spec No Authentication based on API keys stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. kubernetesTokenReview
KubernetesTokenReview spec No Authentication by Kubernetes token review. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. jwt
JWT verification spec No Authentication based on JSON Web Tokens (JWT). Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. oauth2Introspection
OAuth2 Token Introscpection spec No Authentication by OAuth2 token introspection. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. x509
X.509 authentication spec No Authentication based on client X.509 certificates. The certificates presented by the clients must be signed by a trusted CA whose certificates are stored in Kubernetes secrets. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. plain
Plain identity object spec No Identity object extracted from the context. Use this method when authentication is performed beforehand by a proxy and the resulting object passed to Authorino as JSON in the auth request. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. anonymous
Anonymous access No Anonymous access. Use one of: apiKey
, jwt
, oauth2Introspection
, kubernetesTokenReview
, x509
, plain
, anonymous
. credentials
Auth credentials spec No Customizations to where credentials are required to be passed in the request for authentication based on this auth rule. Defaults to HTTP Authorization header with prefix \"Bearer\". overrides
Identity extension spec No JSON overrides to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). defaults
Identity extension spec No JSON defaults to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#metadatarule","title":"MetadataRule","text":"Field Type Required Description http
HTTP GET/GET-by-POST external metadata spec No External source of auth metadata via HTTP request. Use one of: http
, userInfo
, uma
. userInfo
OIDC UserInfo spec No OpendID Connect UserInfo linked to an OIDC authentication rule declared in this same AuthPolicy. Use one of: http
, userInfo
, uma
. uma
UMA metadata spec No User-Managed Access (UMA) source of resource data. Use one of: http
, userInfo
, uma
. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authorizationrule","title":"AuthorizationRule","text":"Field Type Required Description patternMatching
Pattern-matching authorization spec No Pattern-matching authorization rules. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. opa
OPA authorization spec No Open Policy Agent (OPA) Rego policy. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. kubernetesSubjectAccessReview
Kubernetes SubjectAccessReview spec No Authorization by Kubernetes SubjectAccessReview. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. spicedb
SpiceDB authorization spec No Authorization decision delegated to external Authzed/SpiceDB server. Use one of: patternMatching
, opa
, kubernetesSubjectAccessReview
, spicedb
. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#responsespec","title":"ResponseSpec","text":"Field Type Required Description unauthenticated
Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthenticated. (Default: 401 Unauthorized
) unauthorized
Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthorized. (Default: 403 Forbidden
) success
SuccessResponseSpec No Response items to be included in the auth response when the request is authenticated and authorized."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponsespec","title":"SuccessResponseSpec","text":"Field Type Required Description headers
MapSuccessResponseItem> No Custom success response items wrapped as HTTP headers to be injected in the request. dynamicMetadata
MapSuccessResponseItem> No Custom success response items wrapped as Envoy Dynamic Metadata. Use it to pass data along to other proxy filters, such as the rate-limit filter."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponseitem","title":"SuccessResponseItem","text":"Field Type Required Description plain
Plain text response item No Plain text content. Use one of: plain
, json
, wristband
. json
JSON injection response item No Specification of a JSON object. Use one of: plain
, json
, wristband
. wristband
Festival Wristband token response item No Specification of a JSON object. Use one of: plain
, json
, wristband
. key
String No The key used to add the custom response item (name of the HTTP header or root property of the Dynamic Metadata object). Defaults to the name of the response item if omitted."},{"location":"kuadrant-operator/doc/reference/authpolicy/#callbackrule","title":"CallbackRule","text":"Field Type Required Description http
HTTP endpoints callback spec No HTTP endpoint settings to build the callback request (webhook). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#namedpattern","title":"NamedPattern","text":"Field Type Required Description selector
String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value
, using the operator
. operator
String Yes The binary operator to be applied to the resolved value specified by the selector. One of: eq
(equal to), neq
(not equal to), incl
(includes; for arrays), excl
(excludes; for arrays), matches
(regex). value
String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicystatus","title":"AuthPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/authpolicy/#conditionspec","title":"ConditionSpec","text":" - The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
- The message field is a human-readable message indicating details about the transition.
- The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
- The status field is a string, with possible values True, False, and Unknown.
- The type field is a string with the following possible values:
- Available: the resource has successfully configured;
Field Type Description type
String Condition Type status
String Status: True, False, Unknown reason
String Condition state reason message
String Condition state description lastTransitionTime
Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/dnspolicy/","title":"The DNSPolicy Custom Resource Definition (CRD)","text":" - DNSPolicy
- DNSPolicySpec
- HealthCheckSpec
- LoadBalancingSpec
- LoadBalancingWeighted
- CustomWeight
- LoadBalancingGeo
- DNSPolicyStatus
- HealthCheckStatus
"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicy","title":"DNSPolicy","text":"Field Type Required Description spec
DNSPolicySpec Yes The specification for DNSPolicy custom resource status
DNSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicyspec","title":"DNSPolicySpec","text":"Field Type Required Description targetRef
Gateway API PolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to healthCheck
HealthCheckSpec No HealthCheck spec loadBalancing
LoadBalancingSpec Yes(loadbalanced only) LoadBalancing Spec, required when routingStrategy is \"loadbalanced\" routingStrategy
String (immutable) Yes Immutable! Routing Strategy to use, one of \"simple\" or \"loadbalanced\""},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description endpoint
String Yes Endpoint is the path to append to the host to reach the expected health check port
Number Yes Port to connect to the host on protocol
String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold
Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingspec","title":"LoadBalancingSpec","text":"Field Type Required Description weighted
LoadBalancingWeighted Yes Weighted routing spec geo
LoadBalancingGeo Yes Geo routing spec"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingweighted","title":"LoadBalancingWeighted","text":"Field Type Required Description defaultWeight
Number Yes Default weight to apply to created records custom
[]CustomWeight No Custom weights to manipulate records weights based on label selectors"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#customweight","title":"CustomWeight","text":"Field Type Description selector
metav1.LabelSelector Label Selector to specify resources that should have this weight applied weight
Number Weight value to apply for matching resources"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancinggeo","title":"LoadBalancingGeo","text":"Field Type Required Description defaultGeo
String Yes Default geo to apply to records"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicystatus","title":"DNSPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource. healthCheck
HealthCheckStatus HealthCheck status. recordConditions
[String][]Kubernetes meta/v1.Condition Status of individual DNSRecords owned by this policy."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/kuadrant/","title":"The Kuadrant Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrant","title":"kuadrant","text":"Note on Limitador The Kuadrant operator creates a Limitador CR named `limitador` in the same namespace as the Kuadrant CR. If there is a pre-existing Limitador CR of the same name the kuadrant operator will take ownership of that Limitador CR. Field Type Required Description spec
KuadrantSpec No The specification for Kuadrant custom resource. status
KuadrantStatus No The status for the custom resources."},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantspec","title":"KuadrantSpec","text":"Field Type Required Description limitador
Limitador No Configure limitador deployments."},{"location":"kuadrant-operator/doc/reference/kuadrant/#limitador","title":"Limitador","text":"Field Type Required Description affinity
Affinity No Describes the scheduling rules for limitador pods. replicas
Number No Sets the number of limitador replicas to deploy. resourceRequirements
ResourceRequirements No Set the resource requirements for limitador pods. pdb
PodDisruptionBudgetType No Configure allowed pod disruption budget fields. storage
Storage No Define backend storage option for limitador."},{"location":"kuadrant-operator/doc/reference/kuadrant/#poddisruptionbudgettype","title":"PodDisruptionBudgetType","text":"Field Type Required Description maxUnavailable
Number No An eviction is allowed if at most \"maxUnavailable\" limitador pods are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\". minAvailable
Number No An eviction is allowed if at least \"minAvailable\" limitador pods will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\"."},{"location":"kuadrant-operator/doc/reference/kuadrant/#storage","title":"Storage","text":"Field Type Required Description redis
Redis No Uses Redis to store limitador counters. redis-cached
RedisCached No Uses Redis to store limitador counters, with an in-memory cache disk
Disk No Counters are held on disk (persistent). Kubernetes Persistent Volumes will be used to store counters."},{"location":"kuadrant-operator/doc/reference/kuadrant/#redis","title":"Redis","text":"Field Type Required Description configSecretRef
LocalObjectReference No ConfigSecretRef refers to the secret holding the URL for Redis."},{"location":"kuadrant-operator/doc/reference/kuadrant/#rediscached","title":"RedisCached","text":"Field Type Required Description configSecretRef
LocalObjectReference No ConfigSecretRef refers to the secret holding the URL for Redis. options
Options No Configures a number of caching options for limitador."},{"location":"kuadrant-operator/doc/reference/kuadrant/#options","title":"Options","text":"Field Type Required Description ttl
Number No TTL for cached counters in milliseconds [default: 5000] ratio
Number No Ratio to apply to the TTL from Redis on cached counters [default: 10] flush-period
Number No FlushPeriod for counters in milliseconds [default: 1000] max-cached
Number No MaxCached refers to the maximum amount of counters cached [default: 10000]"},{"location":"kuadrant-operator/doc/reference/kuadrant/#disk","title":"Disk","text":"Field Type Required Description persistentVolumeClaim
PVCGenericSpec No Configure resources for PVC. Optimize
String No Defines optimization option of the disk persistence type. Valid options: \"throughput\", \"disk\""},{"location":"kuadrant-operator/doc/reference/kuadrant/#pvcgenericspec","title":"PVCGenericSpec","text":"Field Type Required Description storageClassName
String No Storage class name resources
PersistentVolumeClaimResources No Resources represent the minimum resources the volume should have volumeName
String No VolumeName is the binding reference to the PersistentVolume backing the claim"},{"location":"kuadrant-operator/doc/reference/kuadrant/#persistentvolumeclaimresources","title":"PersistentVolumeClaimResources","text":"Field Type Required Description requests
Quantity Yes Storage resources requests to be used on the persisitentVolumeClaim"},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantstatus","title":"KuadrantStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/","title":"The RateLimitPolicy Custom Resource Definition (CRD)","text":" - RateLimitPolicy
- RateLimitPolicySpec
- RateLimitPolicyCommonSpec
- Limit
- RateLimit
- WhenCondition
- RateLimitPolicyStatus
- ConditionSpec
"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicy","title":"RateLimitPolicy","text":"Field Type Required Description spec
RateLimitPolicySpec Yes The specification for RateLimitPolicy custom resource status
RateLimitPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicyspec","title":"RateLimitPolicySpec","text":"Field Type Required Description targetRef
PolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to defaults
RateLimitPolicyCommonSpec No Default limit definitions. This field is mutually exclusive with the limits
field overrides
RateLimitPolicyCommonSpec No Overrides limit definitions. This field is mutually exclusive with the limits
field and defaults
field. This field is only allowed for policies targeting Gateway
in targetRef.kind
limits
MapLimit> No Limit definitions. This field is mutually exclusive with the defaults
field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicycommonspec","title":"RateLimitPolicyCommonSpec","text":"Field Type Required Description limits
MapLimit> No Explicit Limit definitions. This field is mutually exclusive with RateLimitPolicySpec limits
field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#limit","title":"Limit","text":"Field Type Required Description rates
[]RateLimit No List of rate limits associated with the limit definition counters
[]String No List of rate limit counter qualifiers. Items must be a valid Well-known attribute. Each distinct value resolved in the data plane starts a separate counter for each rate limit. routeSelectors
[]RouteSelector No List of selectors of HTTPRouteRules whose matching rules activate the limit. At least one HTTPRouteRule must be selected to activate the limit. If omitted, all HTTPRouteRules of the targeted HTTPRoute activate the limit. Do not use it in policies targeting a Gateway. when
[]WhenCondition No List of additional dynamic conditions (expressions) to activate the limit. All expression must evaluate to true for the limit to be applied. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames
and spec.rules.matches
fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimit","title":"RateLimit","text":"Field Type Required Description limit
Number Yes Maximum value allowed within the given period of time (duration) duration
Number Yes The period of time in the specified unit that the limit applies unit
String Yes Unit of time for the duration of the limit. One-of: \"second\", \"minute\", \"hour\", \"day\"."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#whencondition","title":"WhenCondition","text":"Field Type Required Description selector
String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value
, using the operator
. operator
String Yes The binary operator to be applied to the resolved value specified by the selector. One-of: \"eq\" (equal to), \"neq\" (not equal to) value
String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicystatus","title":"RateLimitPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#conditionspec","title":"ConditionSpec","text":" - The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
- The message field is a human-readable message indicating details about the transition.
- The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
- The status field is a string, with possible values True, False, and Unknown.
- The type field is a string with the following possible values:
- Available: the resource has successfully configured;
Field Type Description type
String Condition Type status
String Status: True, False, Unknown reason
String Condition state reason message
String Condition state description lastTransitionTime
Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/route-selectors/","title":"Route selectors","text":"The route selectors of a policy spec or policy rule (limit definition or auth rule) allow to specify selectors of routes or parts of a route, that transitively induce a set of conditions for a policy or policy rule to be enforced. It is defined as a set of HTTP route matching rules, where these matching rules must exist, partially or identically stated within the HTTPRouteRules of the HTTPRoute that is targeted by the policy.
"},{"location":"kuadrant-operator/doc/reference/route-selectors/#the-routeselectors-field","title":"The routeSelectors
field","text":"The routeSelectors
field can be found in policy specs and policy rules (limit definition or auth rule).
Field Type Required Description routeSelectors
[]RouteSelector No List of route selectors of HTTPRouteRules whose HTTPRouteMatches activate the policy or policy rule."},{"location":"kuadrant-operator/doc/reference/route-selectors/#routeselector","title":"RouteSelector","text":"Each RouteSelector
is an object composed of a set of HTTPRouteMatch objects (from Gateway API), and an additional hostnames
field.
Field Type Required Description matches
[]HTTPRouteMatch No List of selectors of HTTPRouteRules whose matching rules activate the policy or policy rule hostnames
[]Hostname No List of hostnames of the HTTPRoute that activate the policy or policy rule"},{"location":"kuadrant-operator/doc/reference/route-selectors/#mechanics-of-the-route-selectors","title":"Mechanics of the route selectors","text":"Route selectors matches and the HTTPRoute's HTTPRouteMatches are pairwise compared to select or not select HTTPRouteRules that should activate a policy rule. To decide whether the route selector selects a HTTPRouteRule or not, for each pair of route selector HTTPRouteMatch and HTTPRoute HTTPRouteMatch:
- The route selector selects the HTTPRoute's HTTPRouteRule if the HTTPRouteRule contains at least one HTTPRouteMatch that specifies fields that are literally identical to all the fields specified by at least one HTTPRouteMatch of the route selector.
- A HTTPRouteMatch within a HTTPRouteRule may include other fields that are not specified in a route selector match, and yet the route selector match selects the HTTPRouteRule if all fields of the route selector match are identically included in the HTTPRouteRule's HTTPRouteMatch; the opposite is NOT true.
- Each field
path
of a HTTPRouteMatch, as well as each field method
of a HTTPRouteMatch, as well as each element of the fields headers
and queryParams
of a HTTPRouteMatch, is atomic \u2013 this is true for the HTTPRouteMatches within a HTTPRouteRule, as well as for HTTPRouteMatches of a route selector.
Additionally, at least one hostname specified in a route selector must identically match one of the hostnames specified (or inherited, when omitted) by the targeted HTTPRoute.
The semantics of the route selectors allows to assertively relate policy rule definitions to routing rules, with benefits for identifying the subsets of the network that are covered by a policy rule, while preventing unreachable definitions, as well as the overhead associated with the maintenance of such rules across multiple resources throughout time, according to network topology beneath. Moreover, the requirement of not having to be a full copy of the targeted HTTPRouteRule matches, but only partially identical, helps prevent repetition to some degree, as well as it enables to more easily define policy rules that scope across multiple HTTPRouteRules (by specifying less rules in the selector).
"},{"location":"kuadrant-operator/doc/reference/route-selectors/#golden-rules-and-corner-cases","title":"Golden rules and corner cases","text":"A few rules and corner cases to keep in mind while using the RLP's routeSelectors
:
- The golden rule \u2013 The route selectors in a policy or policy rule are not to be interpreted as the route matching rules that activate the policy or policy rule, but as selectors of the route rules that activate the policy or policy rule.
- Due to (1) above, this can lead to cases, e.g., where a route selector that states
matches: [{ method: POST }]
selects a HTTPRouteRule that defines matches: [{ method: POST }, { method: GET }]
, effectively causing the policy or policy rule to be activated on requests to the HTTP method POST
, but also to the HTTP method GET
. - The requirement for the route selector match to state patterns that are identical to the patterns stated by the HTTPRouteRule (partially or entirely) makes, e.g., a route selector such as
matches: { path: { type: PathPrefix, value: /foo } }
to select a HTTPRouteRule that defines matches: { path: { type: PathPrefix, value: /foo }, method: GET }
, but not to select a HTTPRouteRule that only defines matches: { method: GET }
, even though the latter includes technically all HTTP paths; nor it selects a HTTPRouteRule that only defines matches: { path: { type: Exact, value: /foo } }
, even though all requests to the exact path /foo
are also technically requests to /foo*
. - The atomicity property of fields of the route selectors makes, e.g., a route selector such as
matches: { path: { value: /foo } }
to select a HTTPRouteRule that defines matches: { path: { value: /foo } }
, but not to select a HTTPRouteRule that only defines matches: { path: { type: PathPrefix, value: /foo } }
. (This case may actually never happen because PathPrefix
is the default value for path.type
and will be set automatically by the Kubernetes API server.)
Due to the nature of route selectors of defining pointers to HTTPRouteRules, the routeSelectors
field is not supported in a RLP that targets a Gateway resource.
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/","title":"The TLSPolicy Custom Resource Definition (CRD)","text":" - TLSPolicy
- TLSPolicySpec
- TLSPolicyStatus
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicy","title":"TLSPolicy","text":"Field Type Required Description spec
TLSPolicySpec Yes The specification for TLSPolicy custom resource status
TLSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicyspec","title":"TLSPolicySpec","text":"Field Type Required Description targetRef
Gateway API PolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to issuerRef
CertManager meta/v1.ObjectReference Yes IssuerRef is a reference to the issuer for the created certificate commonName
String No CommonName is a common name to be used on the created certificate duration
Kubernetes meta/v1.Duration No The requested 'duration' (i.e. lifetime) of the created certificate. renewBefore
Kubernetes meta/v1.Duration No How long before the currently issued certificate's expiry cert-manager should renew the certificate. usages
[]CertManager v1.KeyUsage No Usages is the set of x509 usages that are requested for the certificate. Defaults to digital signature
and key encipherment
if not specified revisionHistoryLimit
Number No RevisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history privateKey
CertManager meta/v1.CertificatePrivateKey No Options to control private keys used for the Certificate IssuerRef certmanmetav1.ObjectReference
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicystatus","title":"TLSPolicyStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/","title":"Enforcing authentication & authorization with Kuadrant AuthPolicy","text":"This guide walks you through the process of setting up a local Kubernetes cluster with Kuadrant where you will protect Gateway API endpoints by declaring Kuadrant AuthPolicy custom resources.
Two AuthPolicies will be declared:
Use case AuthPolicy App developer 1 AuthPolicy targeting a HTTPRoute that routes traffic to a sample Toy Store application, and enforces API key authentication to all requests in this route, as well as requires API key owners to be mapped to groups:admins
metadata to access a specific HTTPRouteRule of the route. Platform engineer use-case 1 AuthPolicy targeting the istio-ingressgateway
Gateway that enforces a trivial \"deny-all\" policy that locks down any other HTTPRoute attached to the Gateway. Topology:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (AuthPolicy) \u2502\n \u2502 gw-auth \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u25bc\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Gateway) \u2502\n \u2502 istio-ingressgateway \u2502\n \u250c\u2500\u2500\u2500\u2500\u25ba\u2502 \u2502\u25c4\u2500\u2500\u2500\u2510\n \u2502 \u2502 * \u2502 \u2502\n \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n \u2502 \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (HTTPRoute) \u2502 \u2502 (HTTPRoute) \u2502\n \u2502 toystore \u2502 \u2502 other \u2502\n \u2502 \u2502 \u2502 \u2502\n \u2502 api.toystore.com \u2502 \u2502 *.other-apps.com \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (AuthPolicy) \u2502\n \u2502 toystore \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#requisites","title":"Requisites","text":" - Docker
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#run-the-guide-1-4","title":"Run the guide \u2460 \u2192 \u2463","text":""},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#1-setup-persona-cluster-admin","title":"\u2460 Setup (Persona: Cluster admin)","text":"Clone the repo:
git clone git@github.com:Kuadrant/kuadrant-operator.git && cd kuadrant-operator\n
Run the following command to create a local Kubernetes cluster with Kind, install & deploy Kuadrant:
make local-setup\n
Request an instance of Kuadrant in the kuadrant-system
namespace:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#2-deploy-the-toy-store-sample-application-persona-app-developer","title":"\u2461 Deploy the Toy Store sample application (Persona: App developer)","text":"kubectl apply -f examples/toystore/toystore.yaml\n\nkubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/cars\"\n - method: GET\n path:\n type: PathPrefix\n value: \"/dolls\"\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Send requests to the application unprotected:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/dolls -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#3-protect-the-toy-store-application-persona-app-developer","title":"\u2462 Protect the Toy Store application (Persona: App developer)","text":"Create the AuthPolicy to enforce the following auth rules:
- Authentication:
- All users must present a valid API key
- Authorization:
/admin*
routes require user mapped to the admins
group (kuadrant.io/groups=admins
annotation added to the Kubernetes API key Secret)
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-authn\":\n apiKey:\n selector: {}\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"only-admins\":\n opa:\n rego: |\n groups := split(object.get(input.auth.identity.metadata.annotations, \"kuadrant.io/groups\", \"\"), \",\")\n allow { groups[_] == \"admins\" }\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\nEOF\n
Create the API keys:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-regular-user\n labels:\n authorino.kuadrant.io/managed-by: authorino\nstringData:\n api_key: iamaregularuser\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-admin-user\n labels:\n authorino.kuadrant.io/managed-by: authorino\n annotations:\n kuadrant.io/groups: admins\nstringData:\n api_key: iamanadmin\ntype: Opaque\nEOF\n
Send requests to the application protected by Kuadrant:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 401 Unauthorized\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 403 Forbidden\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamanadmin' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#4-create-a-default-deny-all-policy-at-the-level-of-the-gateway-persona-platform-engineer","title":"\u2463 Create a default \"deny-all\" policy at the level of the gateway (Persona: Platform engineer)","text":"Create the policy:
kubectl -n istio-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: gw-auth\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: istio-ingressgateway\n rules:\n authorization:\n deny-all:\n opa:\n rego: \"allow = false\"\n response:\n unauthorized:\n headers:\n \"content-type\":\n value: application/json\n body:\n value: |\n {\n \"error\": \"Forbidden\",\n \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n }\nEOF\n
The policy won't be effective until there is at least one accepted route not yet protected by another more specific policy attached to it.
Create a route that will inherit the default policy attached to the gateway:
kubectl apply -f -<<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: other\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.other-apps.com\"\nEOF\n
Send requests to the route protected by the default policy set at the level of the gateway:
curl -H 'Host: foo.other-apps.com' http://$GATEWAY_URL/ -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/","title":"Authenticated Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure authenticated rate limiting for an application using Kuadrant.
Authenticated rate limiting rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:
- User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
- Each user can send up to 20rpm (\"request per minute\").
- Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.
In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy
, to mimic an operation of reading toy records.
We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.
User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#run-the-steps-1-4","title":"Run the steps \u2460 \u2192 \u2463","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Create the deployment:
kubectl apply -f examples/toystore/toystore.yaml\n
Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - path:\n type: Exact\n value: \"/toy\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Verify the route works:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#3-enforce-authentication-on-requests-to-the-toy-store-api","title":"\u2462 Enforce authentication on requests to the Toy Store API","text":"Create a Kuadrant AuthPolicy
to configure the authentication:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels:\n app: toystore\n allNamespaces: true\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
Verify the authentication works by sending a request to the Toy Store API without API key:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n
Create API keys for users alice
and bob
to authenticate:
Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.
kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: bob-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: bob\nstringData:\n api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: alice-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: alice\nstringData:\n api_key: IAMALICE\ntype: Opaque\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#4-enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2463 Enforce authenticated rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"alice-limit\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n \"bob-limit\":\n rates:\n - limit: 2\n duration: 10\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: bob\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests as Alice and Bob.
Up to 5 successful (200 OK
) requests every 10 seconds allowed for Alice, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Up to 2 successful (200 OK
) requests every 10 seconds allowed for Bob, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/","title":"Authenticated Rate Limiting with JWTs and Kubernetes RBAC","text":"This user guide walks you through an example of how to use Kuadrant to protect an application with policies to enforce:
- authentication based OpenId Connect (OIDC) ID tokens (signed JWTs), issued by a Keycloak server;
- alternative authentication method by Kubernetes Service Account tokens;
- authorization delegated to Kubernetes RBAC system;
- rate limiting by user ID.
In this example, we will protect a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request.
The API listens to requests at the hostnames *.toystore.com
, where it exposes the endpoints GET /toy*
, POST /admin/toy
and DELETE /amind/toy
, respectively, to mimic operations of reading, creating, and deleting toy records.
Any authenticated user/service account can send requests to the Toy Store API, by providing either a valid Keycloak-issued access token or Kubernetes token.
Privileges to execute the requested operation (read, create or delete) will be granted according to the following RBAC rules, stored in the Kubernetes authorization system:
Operation Endpoint Required role Read GET /toy*
toystore-reader
Create POST /admin/toy
toystore-write
Delete DELETE /admin/toy
toystore-write
Each user will be entitled to a maximum of 5rp10s (5 requests every 10 seconds).
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#requirements","title":"Requirements","text":" - Docker
- kubectl command-line tool
- jq
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#run-the-guide-1-6","title":"Run the guide \u2460 \u2192 \u2465","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#1-setup-a-cluster-with-kuadrant","title":"\u2460 Setup a cluster with Kuadrant","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Deploy the application in the default
namespace:
kubectl apply -f examples/toystore/toystore.yaml\n
Route traffic to the application:
kubectl apply -f examples/toystore/httproute.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#api-lifecycle","title":"API lifecycle","text":""},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-unprotected","title":"Try the API unprotected","text":"Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
It should return 200 OK
.
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#3-deploy-keycloak","title":"\u2462 Deploy Keycloak","text":"Create the namesapce:
kubectl create namespace keycloak\n
Deploy Keycloak with a bootstrap realm, users, and clients:
kubectl apply -n keycloak -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Note: The Keycloak server may take a couple of minutes to be ready.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#4-enforce-authentication-and-authorization-for-the-toy-store-api","title":"\u2463 Enforce authentication and authorization for the Toy Store API","text":"Create a Kuadrant AuthPolicy
to configure authentication and authorization:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore-protection\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"keycloak-users\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n \"k8s-service-accounts\":\n kubernetesTokenReview:\n audiences:\n\n - https://kubernetes.default.svc.cluster.local\n overrides:\n \"sub\":\n selector: auth.identity.user.username\n authorization:\n \"k8s-rbac\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.sub\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.sub\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-missing-authentication","title":"Try the API missing authentication","text":"curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-users\"\n# www-authenticate: Bearer realm=\"k8s-service-accounts\"\n# x-ext-auth-reason: {\"k8s-service-accounts\":\"credential not found\",\"keycloak-users\":\"credential not found\"}\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-without-permission","title":"Try the API without permission","text":"Obtain an access token with the Keycloak server:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
Send a request to the API as the Keycloak-authenticated user while still missing permissions:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
Create a Kubernetes Service Account to represent a consumer of the API associated with the alternative source of identities k8s-service-accounts
:
kubectl apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: client-app-1\nEOF\n
Obtain an access token for the client-app-1
service account:
SA_TOKEN=$(kubectl create token client-app-1)\n
Send a request to the API as the service account while still missing permissions:
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#5-grant-access-to-the-toy-store-api-for-user-and-service-account","title":"\u2464 Grant access to the Toy Store API for user and service account","text":"Create the toystore-reader
and toystore-writer
roles:
kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: toystore-reader\nrules:\n\n- nonResourceURLs: [\"/toy*\"]\n verbs: [\"get\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: toystore-writer\nrules:\n- nonResourceURLs: [\"/admin/toy\"]\n verbs: [\"post\", \"delete\"]\nEOF\n
Add permissions to the user and service account:
User Kind Roles john User registered in Keycloak toystore-reader
, toystore-writer
client-app-1 Kuberentes Service Account toystore-reader
kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: toystore-readers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: toystore-reader\nsubjects:\n\n- kind: User\n name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\n- kind: ServiceAccount\n name: client-app-1\n namespace: default\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: toystore-writers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: toystore-writer\nsubjects:\n- kind: User\n name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\nEOF\n
Q: Can I use Roles
and RoleBindings
instead of ClusterRoles
and ClusterRoleBindings
? Yes, you can.
The example above is for non-resource URL Kubernetes roles. For using Roles
and RoleBindings
instead of ClusterRoles
and ClusterRoleBindings
, thus more flexible resource-based permissions to protect the API, see the spec for Kubernetes SubjectAccessReview authorization in the Authorino docs.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-with-permission","title":"Try the API with permission","text":"Send requests to the API as the Keycloak-authenticated user:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 200 OK\n
Send requests to the API as the Kubernetes service account:
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#6-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2465 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"per-user\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n counters:\n - metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-rate-limited","title":"Try the API rate limited","text":"Each user should be entitled to a maximum of 5 requests every 10 seconds.
Note: If the tokens have expired, you may need to refresh them first.
Send requests as the Keycloak-authenticated user:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Send requests as the Kubernetes service account:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/","title":"Gateway DNS for Cluster Operators","text":"This user guide walks you through an example of how to configure DNS for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#requisites","title":"Requisites","text":" - Docker
- Rout53 Hosted Zone
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#setup","title":"Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Create a namespace:
kubectl create namespace my-gateways\n
Export a root domain and hosted zone id:
export ROOT_DOMAIN=<ROOT_DOMAIN>\nexport AWS_HOSTED_ZONE_ID=<AWS_HOSTED_ZONE_ID>\n
Note: ROOT_DOMAIN and AWS_HOSTED_ZONE_ID should be set to your AWS hosted zone name and id respectively.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#create-a-managedzone","title":"Create a ManagedZone","text":"Create AWS credentials secret
export AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY>\n\nkubectl -n my-gateways create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
Create a ManagedZone
kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: $ROOT_DOMAIN\nspec:\n id: $AWS_HOSTED_ZONE_ID\n domainName: $ROOT_DOMAIN\n description: \"my managed zone\"\n dnsProviderSecretRef:\n name: aws-credentials\nEOF\n
Check it's ready
kubectl get managedzones -n my-gateways\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"Create a gateway using your ROOT_DOMAIN as part of a listener hostname:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$ROOT_DOMAIN\"\n port: 80\n protocol: HTTP\nEOF\n
Check gateway status:
kubectl get gateway prod-web -n my-gateways\n
Response: NAME CLASS ADDRESS PROGRAMMED AGE\nprod-web istio 172.18.200.1 True 25s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#enable-dns-on-the-gateway","title":"Enable DNS on the gateway","text":"Create a Kuadrant DNSPolicy
to configure DNS:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\nEOF\n
Check policy status:
kubectl get dnspolicy -o wide -n my-gateways\n
Response: NAME STATUS TARGETREFKIND TARGETREFNAME AGE\nprod-web Accepted Gateway prod-web 26s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#deploy-a-sample-api-to-test-dns","title":"Deploy a sample API to test DNS","text":"Deploy the sample API:
kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n
Route traffic to the API from our gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: prod-web\n namespace: my-gateways\n hostnames:\n - \"*.$ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
Verify a DNSRecord resource is created:
kubectl get dnsrecords -n my-gateways\nNAME READY\nprod-web-api True\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#verify-dns-works-by-sending-requests","title":"Verify DNS works by sending requests","text":"Verify DNS using dig:
dig foo.$ROOT_DOMAIN +short\n
Response: 172.18.200.1\n
Verify DNS using curl:
curl http://api.$ROOT_DOMAIN\n
Response: {\n \"method\": \"GET\",\n \"path\": \"/\",\n \"query_string\": null,\n \"body\": \"\",\n \"headers\": {\n \"HTTP_HOST\": \"api.$ROOT_DOMAIN\",\n \"HTTP_USER_AGENT\": \"curl/7.85.0\",\n \"HTTP_ACCEPT\": \"*/*\",\n \"HTTP_X_FORWARDED_FOR\": \"10.244.0.1\",\n \"HTTP_X_FORWARDED_PROTO\": \"http\",\n \"HTTP_X_ENVOY_INTERNAL\": \"true\",\n \"HTTP_X_REQUEST_ID\": \"9353dd3d-0fe5-4404-86f4-a9732a9c119c\",\n \"HTTP_X_ENVOY_DECORATOR_OPERATION\": \"toystore.my-gateways.svc.cluster.local:80/*\",\n \"HTTP_X_ENVOY_PEER_METADATA\": \"ChQKDkFQUF9DT05UQUlORVJTEgIaAAoaCgpDTFVTVEVSX0lEEgwaCkt1YmVybmV0ZXMKHQoMSU5TVEFOQ0VfSVBTEg0aCzEwLjI0NC4wLjIyChkKDUlTVElPX1ZFUlNJT04SCBoGMS4xNy4yCtcBCgZMQUJFTFMSzAEqyQEKIwoVaXN0aW8uaW8vZ2F0ZXdheS1uYW1lEgoaCHByb2Qtd2ViChkKDGlzdGlvLmlvL3JldhIJGgdkZWZhdWx0CjMKH3NlcnZpY2UuaXN0aW8uaW8vY2Fub25pY2FsLW5hbWUSEBoOcHJvZC13ZWItaXN0aW8KLwojc2VydmljZS5pc3Rpby5pby9jYW5vbmljYWwtcmV2aXNpb24SCBoGbGF0ZXN0CiEKF3NpZGVjYXIuaXN0aW8uaW8vaW5qZWN0EgYaBHRydWUKGgoHTUVTSF9JRBIPGg1jbHVzdGVyLmxvY2FsCigKBE5BTUUSIBoecHJvZC13ZWItaXN0aW8tYzU0NWQ4ZjY4LTdjcjg2ChoKCU5BTUVTUEFDRRINGgtteS1nYXRld2F5cwpWCgVPV05FUhJNGktrdWJlcm5ldGVzOi8vYXBpcy9hcHBzL3YxL25hbWVzcGFjZXMvbXktZ2F0ZXdheXMvZGVwbG95bWVudHMvcHJvZC13ZWItaXN0aW8KFwoRUExBVEZPUk1fTUVUQURBVEESAioACiEKDVdPUktMT0FEX05BTUUSEBoOcHJvZC13ZWItaXN0aW8=\",\n \"HTTP_X_ENVOY_PEER_METADATA_ID\": \"router~10.244.0.22~prod-web-istio-c545d8f68-7cr86.my-gateways~my-gateways.svc.cluster.local\",\n \"HTTP_X_ENVOY_ATTEMPT_COUNT\": \"1\",\n \"HTTP_X_B3_TRACEID\": \"d65f580db9c6a50c471cdb534771c61a\",\n \"HTTP_X_B3_SPANID\": \"471cdb534771c61a\",\n \"HTTP_X_B3_SAMPLED\": \"0\",\n \"HTTP_VERSION\": \"HTTP/1.1\"\n },\n \"uuid\": \"0ecb9f84-db30-4289-a3b8-e22d4021122f\"\n}\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-dns/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/","title":"Gateway Rate Limiting for Cluster Operators","text":"This user guide walks you through an example of how to configure rate limiting for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#run-the-steps-1-5","title":"Run the steps \u2460 \u2192 \u2464","text":""},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#2-create-the-ingress-gateways","title":"\u2461 Create the ingress gateways","text":"kubectl -n istio-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: external\n annotations:\n kuadrant.io/namespace: kuadrant-system\n networking.istio.io/service-type: ClusterIP\nspec:\n gatewayClassName: istio\n listeners:\n\n - name: external\n port: 80\n protocol: HTTP\n hostname: '*.io'\n allowedRoutes:\n namespaces:\n from: All\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: internal\n annotations:\n kuadrant.io/namespace: kuadrant-system\n networking.istio.io/service-type: ClusterIP\nspec:\n gatewayClassName: istio\n listeners:\n - name: local\n port: 80\n protocol: HTTP\n hostname: '*.local'\n allowedRoutes:\n namespaces:\n from: All\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#3-enforce-rate-limiting-on-requests-incoming-through-the-external-gateway","title":"\u2462 Enforce rate limiting on requests incoming through the external
gateway","text":" \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Gateway) \u2502 \u2502 (Gateway) \u2502\n \u2502 external \u2502 \u2502 internal \u2502\n \u2502 \u2502 \u2502 \u2502\n \u2502 *.io \u2502 \u2502 *.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u25b2\n \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (RateLimitPolicy) \u2502\n\u2502 gw-rlp \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -n istio-system -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: gw-rlp\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: external\n limits:\n \"global\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#4-deploy-a-sample-api-to-test-rate-limiting-enforced-at-the-level-of-the-gateway","title":"\u2463 Deploy a sample API to test rate limiting enforced at the level of the gateway","text":" \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 (Gateway) \u2502 \u2502 (Gateway) \u2502\n\u2502 (RateLimitPolicy) \u2502 \u2502 external \u2502 \u2502 internal \u2502\n\u2502 gw-rlp \u251c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 *.io \u2502 \u2502 *.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502 \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (HTTPRoute) \u2502\n \u2502 toystore \u2502\n \u2502 \u2502\n \u2502 *.toystore.io \u2502\n \u2502 *.toystore.local \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n \u2502 (Service) \u2502\n \u2502 toystore \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Deploy the sample API:
kubectl apply -f examples/toystore/toystore.yaml\n
Route traffic to the API from both gateways:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: external\n namespace: istio-system\n - name: internal\n namespace: istio-system\n hostnames:\n - \"*.toystore.io\"\n - \"*.toystore.local\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#5-verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"\u2464 Verify the rate limiting works by sending requests in a loop","text":"Expose the gateways, respectively at the port numbers 9081
and 9082
of the local host:
kubectl port-forward -n istio-system service/external-istio 9081:80 2>&1 >/dev/null &\nkubectl port-forward -n istio-system service/internal-istio 9082:80 2>&1 >/dev/null &\n
Up to 5 successful (200 OK
) requests every 10 seconds through the external
ingress gateway (*.io
), then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) through the internal
ingress gateway (*.local
):
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9082 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/","title":"Gateway TLS for Cluster Operators","text":"This user guide walks you through an example of how to configure TLS for all routes attached to an ingress gateway.
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#requisites","title":"Requisites","text":" - Docker
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#setup","title":"Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API, CertManager and Kuadrant itself.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Create a namespace:
kubectl create namespace my-gateways\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"Create a gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.toystore.local\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: toystore-local-tls\n kind: Secret\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#enable-tls-on-the-gateway","title":"Enable TLS on the gateway","text":"The TLSPolicy requires a reference to an existing CertManager Issuer.
Create a CertManager Issuer:
kubectl apply -n my-gateways -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: selfsigned-issuer\nspec:\n selfSigned: {}\nEOF\n
Note: We are using a self-signed issuer here but any supported CerManager issuer or cluster issuer can be used.
kubectl get issuer selfsigned-issuer -n my-gateways\n
Response: NAME READY AGE\nselfsigned-issuer True 18s\n
Create a Kuadrant TLSPolicy
to configure TLS:
kubectl apply -n my-gateways -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: prod-web\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: Issuer\n name: selfsigned-issuer\nEOF\n
Check policy status:
kubectl get tlspolicy -o wide -n my-gateways\n
Response: NAME STATUS TARGETREFKIND TARGETREFNAME AGE\nprod-web Accepted Gateway prod-web 13s\n
Check a Certificate resource was created:
kubectl get certificates -n my-gateways\n
Response NAME READY SECRET AGE\ntoystore-local-tls True toystore-local-tls 7m30s\n
Check a TLS Secret resource was created:
kubectl get secrets -n my-gateways --field-selector=\"type=kubernetes.io/tls\"\n
Response: NAME TYPE DATA AGE\ntoystore-local-tls kubernetes.io/tls 3 7m42s\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#deploy-a-sample-api-to-test-tls","title":"Deploy a sample API to test TLS","text":"Deploy the sample API:
kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n
Route traffic to the API from our gateway:
kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: prod-web\n namespace: my-gateways\n hostnames:\n - \"*.toystore.local\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#verify-tls-works-by-sending-requests","title":"Verify TLS works by sending requests","text":"Get the gateway address@
GWADDRESS=`kubectl get gateway/prod-web -n my-gateways -o=jsonpath='{.status.addresses[?(@.type==\"IPAddress\")].value}'`\necho $GWADDRESS\n
Response: 172.18.200.1\n
Verify we can access the service via TLS:
curl -vkI https://api.toystore.local --resolve \"api.toystore.local:443:$GWADDRESS\"\n
Response: * Added api.toystore.local:443:172.18.200.1 to DNS cache\n* Hostname api.toystore.local was found in DNS cache\n* Trying 172.18.200.1:443...\n* Connected to api.toystore.local (172.18.200.1) port 443 (#0)\n* ALPN: offers h2\n* ALPN: offers http/1.1\n* TLSv1.0 (OUT), TLS header, Certificate Status (22):\n* TLSv1.3 (OUT), TLS handshake, Client hello (1):\n* TLSv1.2 (IN), TLS header, Certificate Status (22):\n* TLSv1.3 (IN), TLS handshake, Server hello (2):\n* TLSv1.2 (IN), TLS header, Finished (20):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):\n* TLSv1.3 (IN), TLS handshake, Certificate (11):\n* TLSv1.3 (IN), TLS handshake, CERT verify (15):\n* TLSv1.3 (IN), TLS handshake, Finished (20):\n* TLSv1.2 (OUT), TLS header, Finished (20):\n* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.3 (OUT), TLS handshake, Finished (20):\n* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384\n* ALPN: server accepted h2\n* Server certificate:\n* subject: [NONE]\n* start date: Feb 15 11:46:50 2024 GMT\n* expire date: May 15 11:46:50 2024 GMT\n* Using HTTP2, server supports multiplexing\n* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* h2h3 [:method: HEAD]\n* h2h3 [:path: /]\n* h2h3 [:scheme: https]\n* h2h3 [:authority: api.toystore.local]\n* h2h3 [user-agent: curl/7.85.0]\n* h2h3 [accept: */*]\n* Using Stream ID: 1 (easy handle 0x5623e4fe5bf0)\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n> HEAD / HTTP/2\n> Host: api.toystore.local\n> user-agent: curl/7.85.0\n> accept: */*\n> \n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* old SSL session ID is stale, removing\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* Connection state changed (MAX_CONCURRENT_STREAMS == 2147483647)!\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n< HTTP/2 200 \nHTTP/2 200 \n< content-type: application/json\ncontent-type: application/json\n< server: istio-envoy\nserver: istio-envoy\n< date: Thu, 15 Feb 2024 12:13:27 GMT\ndate: Thu, 15 Feb 2024 12:13:27 GMT\n< content-length: 1658\ncontent-length: 1658\n< x-envoy-upstream-service-time: 1\nx-envoy-upstream-service-time: 1\n\n< \n\n* Connection #0 to host api.toystore.local left intact\n
"},{"location":"kuadrant-operator/doc/user-guides/gateway-tls/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/","title":"Secure, protect, and connect APIs with Kuadrant on OpenShift","text":""},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#overview","title":"Overview","text":"This guide walks you through using Kuadrant on OpenShift to secure, protect, and connect an API exposed by a Gateway that is based on Kubernetes Gateway API. You can use this walkthrough for a Gateway deployed on a single OpenShift cluster or a Gateway distributed across multiple OpenShift clusters with a shared listener hostname. This guide shows how the platform engineer and application developer user roles can each use Kuadrant to achieve their goals.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#what-kuadrant-can-do-for-you-in-a-multicluster-environment","title":"What Kuadrant can do for you in a multicluster environment","text":"You can leverage Kuadrant's capabilities in single or multiple clusters. The following features are designed to work across multiple clusters as well as in a single-cluster environment.
- Multicluster ingress: Kuadrant provides multicluster ingress connectivity using DNS to bring traffic to your Gateways by using a strategy defined in a
DNSPolicy
. - Global rate limiting: Kuadrant can enable global rate limiting use cases when configured to use a shared Redis store for counters based on limits defined by a
RateLimitPolicy
. - Global auth: You can configure a Kuadrant
AuthPolicy
to leverage external auth providers to ensure that different clusters exposing the same API authenticate and authorize in the same way. - Integration with federated metrics stores: Kuadrant has example dashboards and metrics for visualizing your Gateways and observing traffic hitting those Gateways across multiple clusters.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#user-roles","title":"User roles","text":" -
Platform engineer: This guide walks you through deploying a Gateway that provides secure communication and is protected and ready for use by application development teams to deploy an API. It then walks through using this Gateway in clusters in different geographic regions, leveraging Kuadrant to bring specific traffic to your geo-located Gateways to reduce latency and distribute load, while still being protected and secured with global rate limiting and auth.
-
Application developer: This guide walks through how you can use the Kuadrant OpenAPI Specification (OAS) extensions and kuadrantctl
CLI to generate an HTTPRoute
for your API and to add specific auth and rate limiting requirements.
As an optional extra, this guide highlights how both user roles can observe and monitor these Gateways when the OpenShift user workload monitoring and observability stack is deployed.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#deployment-management-tooling","title":"Deployment management tooling","text":"While this document uses kubectl
commands for simplicity, working with multiple clusters is complex, and it is best to use a tool such as Argo CD to manage the deployment of resources to multiple clusters.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites","title":"Prerequisites","text":"This guide expects that you have successfully installed Kuadrant on at least one OpenShift cluster:
- You have completed the steps in Install Kuadrant on an OpenShift cluster for one or more clusters.
- For multicluster scenarios, you have installed Kuadrant on at least two different OpenShift clusters, and have a shared accessible Redis store.
- You have the
kubectl
command line installed. - Optional: User workload monitoring is configured to remote write to a central storage system such as Thanos, as described in Install Kuadrant on an OpenShift cluster.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#platform-engineer-workflow","title":"Platform engineer workflow","text":"NOTE: You must perform the following steps in each cluster individually, unless specifically excluded.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-1-set-your-environment-variables","title":"Step 1 - Set your environment variables","text":"Set the following environment variables used for convenience in this guide:
export zid=change-this-to-your-zone-id\nexport rootDomain=example.com\nexport gatewayNS=api-gateway\nexport gatewayName=external\nexport devNS=toystore\nexport AWS_ACCESS_KEY_ID=xxxx\nexport AWS_SECRET_ACCESS_KEY=xxxx\nexport AWS_REGION=us-east-1\nexport clusterIssuerName=lets-encrypt\nexport EMAIL=foo@example.com\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-2-set-up-a-managed-dns-zone","title":"Step 2 - Set up a managed DNS zone","text":"The managed DNS zone declares a zone and credentials to access the zone that Kuadrant can use to set up DNS configuration.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#create-the-managedzone-resource","title":"Create the ManagedZone resource","text":"Apply the following ManagedZone
resource and AWS credentials to each cluster. Alternatively, if you are adding an additional cluster, add it to the new cluster:
kubectl create ns ${gatewayNS}\n
Create the zone credentials as follows:
kubectl -n ${gatewayNS} create secret generic aws-credentials \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
Then create a ManagedZone
as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: managedzone\n namespace: ${gatewayNS}\nspec:\n id: ${zid}\n domainName: ${rootDomain}\n description: \"Kuadrant managed zone\"\n dnsProviderSecretRef:\n name: aws-credentials\nEOF\n
Wait for the ManagedZone
to be ready in each cluster as follows:
kubectl wait managedzone/managedzone --for=condition=ready=true -n ${gatewayNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-3-add-a-tls-issuer","title":"Step 3 - Add a TLS issuer","text":"To secure communication to the Gateways, you will define a TLS issuer for TLS certificates. This example uses Let's Encrypt, but you can use any issuer supported by cert-manager
.
The following example uses Let's Encrypt staging, which you must also apply to all clusters:
kubectl apply -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: ${clusterIssuerName}\nspec:\n acme:\n email: ${EMAIL} \n privateKeySecretRef:\n name: le-secret\n server: https://acme-staging-v02.api.letsencrypt.org/directory\n solvers:\n\n - dns01:\n route53:\n hostedZoneID: ${zid}\n region: ${AWS_REGION}\n accessKeyIDSecretRef:\n key: AWS_ACCESS_KEY_ID\n name: aws-credentials\n secretAccessKeySecretRef:\n key: AWS_SECRET_ACCESS_KEY\n name: aws-credentials\nEOF\n
Then wait for the ClusterIssuer
to become ready as follows:
kubectl wait clusterissuer/${clusterIssuerName} --for=condition=ready=true\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-4-set-up-a-gateway","title":"Step 4 - Set up a Gateway","text":"For Kuadrant to balance traffic using DNS across two or more clusters, you must define a Gateway with a shared host. You will define this by using an HTTPS listener with a wildcard hostname based on the root domain. As mentioned earlier, you must apply these resources to all clusters.
NOTE: For now, the Gateway is set to accept an HTTPRoute
from the same namespace only. This allows you to restrict who can use the Gateway until it is ready for general use.
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: ${gatewayName}\n namespace: ${gatewayNS}\n labels:\n kuadrant.io/gateway: \"true\"\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: Same\n hostname: \"*.${rootDomain}\"\n name: api\n port: 443\n protocol: HTTPS\n tls:\n certificateRefs:\n - group: \"\"\n kind: Secret\n name: api-${gatewayName}-tls\n mode: Terminate\nEOF\n
Check the status of your Gateway as follows:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\nkubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Programmed\")].message}'\n
Your Gateway should be accepted and programmed (valid and assigned an external address). However, if you check your listener status as follows, you will see that it is not yet programmed or ready to accept traffic due to bad TLS configuration:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
Kuadrant can help with this by using a TLSPolicy.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-5-secure-and-protect-the-gateway-with-auth-tls-rate-limit-and-dns-policies","title":"Step 5 - Secure and protect the Gateway with auth, TLS, rate limit, and DNS policies","text":"While your Gateway is now deployed, it has no exposed endpoints and your listener is not programmed. Next, you can set up a TLSPolicy
that leverages your CertificateIssuer to set up your listener certificates.
You will also define an AuthPolicy
that will set up a default 403
response for any unprotected endpoints, as well as a RateLimitPolicy
that will set up a default artificially low global limit to further protect any endpoints exposed by this Gateway.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-auth-policy","title":"Set the Auth policy","text":"Set a default, deny-all AuthPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: ${gatewayName}-auth\n namespace: ${gatewayNS}\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: ${gatewayName}\n defaults:\n rules:\n authorization:\n \"deny\":\n opa:\n rego: \"allow = false\"\nEOF\n
Check that your auth policy was accepted by the controller as follows:
kubectl get authpolicy ${gatewayName}-auth -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-tls-policy","title":"Set the TLS policy","text":"Set the TLSPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: ${gatewayName}-tls\n namespace: ${gatewayNS}\nspec:\n targetRef:\n name: ${gatewayName}\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: ${clusterIssuerName}\nEOF\n
Check that your TLS policy was accepted by the controller as follows:
kubectl get tlspolicy ${gatewayName}-tls -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-rate-limit-policy","title":"Set the rate limit policy","text":"Set the default RateLimitPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: ${gatewayName}-rlp\n namespace: ${gatewayNS}\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: ${gatewayName}\n defaults:\n limits:\n \"low-limit\":\n rates:\n\n - limit: 2\n duration: 10\n unit: second\nEOF\n
To check your rate limits have been accepted, enter the following command:
kubectl get ratelimitpolicy ${gatewayName}-rlp -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-the-dns-policy","title":"Set the DNS policy","text":"Set the DNSPolicy
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: ${gatewayName}-dnspolicy\n namespace: ${gatewayNS}\nspec:\n routingStrategy: loadbalanced\n loadBalancing:\n geo: \n defaultGeo: US \n weighted:\n defaultWeight: 120 \n targetRef:\n name: ${gatewayName}\n group: gateway.networking.k8s.io\n kind: Gateway\nEOF\n
NOTE: The DNSPolicy
will leverage the ManagedZone
that you defined earlier based on the listener hosts defined in the Gateway.
Check that your DNSPolicy
has been accepted as follows:
kubectl get dnspolicy ${gatewayName}-dnspolicy -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#create-an-http-route","title":"Create an HTTP route","text":"Create an HTTPRoute
for your Gateway as follows:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: test\n namespace: ${gatewayNS}\nspec:\n parentRefs:\n\n - name: ${gatewayName}\n namespace: ${gatewayNS}\n hostnames:\n - \"test.${rootDomain}\"\n rules:\n - backendRefs:\n - name: toystore\n port: 80\nEOF\n
Check your Gateway policies are enforced as follows:
kubectl get dnspolicy ${gatewayName}-dnspolicy -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\nkubectl get authpolicy ${gatewayName}-auth -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\nkubectl get ratelimitpolicy ${gatewayName}-rlp -n ${gatewayNS} -o=jsonpath='{.status.conditions[?(@.type==\"Enforced\")].message}'\n
Check your listener is ready as follows:
kubectl get gateway ${gatewayName} -n ${gatewayNS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-6-test-connectivity-and-deny-all-auth","title":"Step 6 - Test connectivity and deny all auth","text":"You can use curl
to hit your endpoint. You should see a 403
. Because this example uses Let's Encrypt staging, you can pass the -k
flag:
curl -k -w \"%{http_code}\" https://$(kubectl get httproute test -n ${gatewayNS} -o=jsonpath='{.spec.hostnames[0]}')\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-7-opening-up-the-gateway-for-other-namespaces","title":"Step 7 - Opening up the Gateway for other namespaces","text":"Because you have configured the Gateway, secured it with Kuadrant policies, and tested it, you can now open it up for use by other teams in other namespaces:
kubectl patch gateway ${gatewayName} -n ${gatewayNS} --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/listeners/0/allowedRoutes/namespaces/from\", \"value\":\"All\"}]'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-8-extending-this-gateway-to-multiple-clusters-and-configuring-geo-based-routing","title":"Step 8 - Extending this Gateway to multiple clusters and configuring geo-based routing","text":"To distribute this Gateway across multiple clusters, repeat this setup process for each cluster. By default, this will implement a round-robin DNS strategy to distribute traffic evenly across the different clusters. Setting up your Gateways to serve clients based on their geographic location is straightforward with your current configuration.
Assuming that you have deployed Gateway instances across multiple clusters as per this guide, the next step involves updating the DNS controller with the geographic regions of the visible Gateways.
For instance, if you have one cluster in North America and another in the EU, you can direct traffic to these Gateways based on their location by applying the appropriate labels:
For your North American cluster, enter the following command:
kubectl label --overwrite gateway ${gatewayName} kuadrant.io/lb-attribute-geo-code=US -n ${gatewayNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#application-developer-workflow","title":"Application developer workflow","text":"This section of the walkthrough focuses on using an OpenAPI Specification (OAS) to define an API. You will use Kuadrant OAS extensions to specify the routing, authentication, and rate limiting requirements. Next, you will use the kuadrantctl
tool to generate an AuthPolicy
, an HTTPRoute
, and a RateLimitPolicy
, which you will then apply to your cluster to enforce the settings defined in your OAS.
NOTE: While this section uses the kuadrantctl
tool, this is not essential. You can also create and apply an AuthPolicy
, RateLimitPolicy
, and HTTPRoute
by using the oc
or kubectl
commands.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites_1","title":"Prerequisites","text":" - You have installed
kuadrantctl
. You can find a compatible binary and download it from the kuadrantctl releases page. - You have the ability to distribute resources generated by
kuadrantctl
to multiple clusters, as though you are a platform engineer.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-1-deploy-the-toystore-app","title":"Step 1 - Deploy the toystore app","text":"To begin, deploy a new version of the toystore
app to a developer namespace as follows:
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/Kuadrant-operator/main/examples/toystore/toystore.yaml -n ${devNS}\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-2-set-up-httproute-and-backend","title":"Step 2 - Set up HTTPRoute and backend","text":"Copy at least one of the following example OAS to a local location:
-
Sample OAS for rate limiting with API key
-
Sample OAS for rate limiting with OIDC
Set up some new environment variables as follows:
export oasPath=examples/oas-apikey.yaml\n# Ensure you still have these environment variables setup from the start of this guide:\nexport rootDomain=example.com\nexport gatewayNS=api-gateway\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-3-use-oas-to-define-your-httproute-rules","title":"Step 3 - Use OAS to define your HTTPRoute rules","text":"You can generate Kuadrant and Gateway API resources directly from OAS documents by using an x-kuadrant
extension.
NOTE: For a more in-depth look at the OAS extension, see the kuadrantctl documentation.
You will use kuadrantctl
to generate your HTTPRoute
.
NOTE: The sample OAS has some placeholders for namespaces and domains. You will inject valid values into these placeholders based on your previous environment variables.
Generate the resource from your OAS as follows, (envsubst
will replace the placeholders):
cat $oasPath | envsubst | kuadrantctl generate gatewayapi httproute --oas - | kubectl apply -f -\n
kubectl get httproute toystore -n ${devNS} -o=yaml\n
You should see that this route is affected by the AuthPolicy
and RateLimitPolicy
defined as defaults on the Gateway in the Gateway namespace.
- lastTransitionTime: \"2024-04-26T13:37:43Z\"\n message: Object affected by AuthPolicy demo/external\n observedGeneration: 2\n reason: Accepted\n status: \"True\"\n type: kuadrant.io/AuthPolicyAffected\n- lastTransitionTime: \"2024-04-26T14:07:28Z\"\n message: Object affected by RateLimitPolicy demo/external\n observedGeneration: 1\n reason: Accepted\n status: \"True\"\n type: kuadrant.io/RateLimitPolicyAffected \n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-4-test-connectivity-and-deny-all-auth","title":"Step 4 - Test connectivity and deny-all auth","text":"You can use curl
to hit an endpoint in the toystore app. Because you are using Let's Encrypt staging in this example, you can pass the -k
flag as follows:
curl -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You are getting a 403
because of the existing default, deny-all AuthPolicy
applied at the Gateway. You can override this for your HTTPRoute
.
Choose one of the following options:
- API key auth flow
- OpenID Connect auth flow
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-5-set-up-api-key-auth-flow","title":"Step 5 - Set up API key auth flow","text":"Set up an example API key in each cluster as follows:
kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: toystore-api-key\n namespace: ${devNS}\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: api_key\nstringData:\n api_key: secret\ntype: Opaque\nEOF\n
Next, generate an AuthPolicy
that uses secrets in your cluster as API keys as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas -\n
From this, you can see an AuthPolicy
generated based on your OAS that will look for API keys in secrets labeled api_key
and look for that key in the header api_key
. You can now apply this to the Gateway as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas - | kubectl apply -f -\n
You should get a 200
from the following GET
because it has no auth requirement:
curl -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should get a 401
for the following POST
request because it does not have any auth requirements:
curl -XPOST -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
Finally, if you add your API key header, with a valid key as follows, you should get a 200
response:
curl -XPOST -H 'api_key: secret' -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#optional-step-6-set-up-openid-connect-auth-flow-skip-if-using-api-key-only","title":"Optional: Step 6 - Set up OpenID Connect auth flow (skip if using API key only)","text":"This section of the walkthrough uses the kuadrantctl
tool to create an AuthPolicy
that integrates with an OpenID provider and a RateLimitPolicy
that leverages JWT values for per-user rate limiting. It is important to note that OpenID requires an external provider. Therefore, you should adapt the following example to suit your specific needs and provider.
The platform engineer workflow established default policies for authentication and rate limiting at your Gateway. The new developer-defined policies, which you will create, are intended to target your HTTPRoute and will supersede the existing policies for requests to your API endpoints, similar to your previous API key example.
The example OAS uses Kuadrant-based extensions. These extensions enable you to define routing and service protection requirements. For more details, see OpenAPI Kuadrant extensions.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#prerequisites_2","title":"Prerequisites","text":" - You have installed and configured an OpenID Connect provider, such as https://www.keycloak.org/.
- You have a realm, client, and users set up. This example assumes a realm in a Keycloak instance called
toystore
. - Copy the OAS from sample OAS for rate-limiting and OIDC to a local location.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#set-up-an-openid-authpolicy","title":"Set up an OpenID AuthPolicy","text":"Set the following environment variables:
export openIDHost=some.keycloak.com\nexport oasPath=examples/oas-oidc.yaml\n
NOTE: The sample OAS has some placeholders for namespaces and domains. You will inject valid values into these placeholders based on your previous environment variables.
You can use your OAS and kuadrantctl
to generate an AuthPolicy
to replace the default on the Gateway as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas -\n
If you are happy with the generated resource, you can apply it to the cluster as follows:
cat $oasPath | envsubst | kuadrantctl generate kuadrant authpolicy --oas - | kubectl apply -f -\n
You should see in the status of the AuthPolicy
that it has been accepted and enforced:
kubectl get authpolicy -n ${devNS} toystore -o=jsonpath='{.status.conditions}'\n
On your HTTPRoute
, you should also see it now affected by this AuthPolicy
in the toystore namespace:
kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/AuthPolicyAffected\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#test-your-openid-authpolicy","title":"Test your OpenID AuthPolicy","text":"You can test your AuthPolicy
as follows:
export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=toystore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${openIDHost}/auth/realms/toystore/protocol/openid-connect/token\" | jq -r '.access_token')\n
curl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should see a 401
response code. Make a request with a valid bearer token as follows:
curl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\n
You should see a 200
response code.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#step-7-set-up-rate-limiting","title":"Step 7 - Set up rate limiting","text":"Lastly, you can generate your RateLimitPolicy
to add your rate limits, based on your OAS file. Rate limiting is simplified for this walkthrough and is based on either the bearer token or the API key value. There are more advanced examples in the How-to guides on the Kuadrant documentation site, for example: Authenticated rate limiting with JWTs and Kubernetes RBAC.
You can continue to use this sample OAS document, which includes both authentication and a rate limit:
export oasPath=examples/oas-oidc.yaml\n
Again, you should see the rate limit policy accepted and enforced:
kubectl get ratelimitpolicy -n ${devNS} toystore -o=jsonpath='{.status.conditions}'\n
On your HTTRoute
, you should now see it is affected by the RateLimitPolicy
in the same namespace:
kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/RateLimitPolicyAffected\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#test-your-ratelimitpolicy","title":"Test your RateLimitPolicy","text":"You can now test your rate limiting as follows:
NOTE: You might need to wait a minute for the new rate limits to be applied. With the following requests, you should see a number of 429 responses.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#api-key-auth","title":"API Key auth","text":"for i in {1..3}\ndo\nprintf \"request $i \"\ncurl -XPOST -H 'api_key:secret' -s -k -o /dev/null -w \"%{http_code}\" \"https://$(kubectl get httproute toystore -n ${devNS} -o=jsonpath='{.spec.hostnames[0]}')/v1/toys\"\nprintf \"\\n -- \\n\"\ndone \n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#openid-connect-auth","title":"OpenID Connect auth","text":"export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=toystore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${openIDHost}/auth/realms/toystore/protocol/openid-connect/token\" | jq -r '.access_token')\n
for i in {1..3}\ndo\ncurl -k -XPOST --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" https://$(kubectl get httproute toystore -n ${devNS}-o=jsonpath='{.spec.hostnames[0]}')/v1/toys\ndone\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/#conclusion","title":"Conclusion","text":"You have completed the secure, protect, and connect walkthrough. To learn more about Kuadrant, visit https://docs.kuadrant.io.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/","title":"Secure, protect, and connect services with Kuadrant on Kubernetes","text":""},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#prerequisites","title":"Prerequisites","text":" - You have completed the Single-cluster Quick Start or Multi-cluster Quick Start.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#overview","title":"Overview","text":"In this guide, we will cover the different policies from Kuadrant and how you can use them to secure, protect and connect an Istio-controlled gateway in a single cluster, and how you can set more refined protection on the HTTPRoutes exposed by that gateway.
Here are the steps we will go through:
1) Deploy a sample application
2) Define a new Gateway
3) Ensure TLS-based secure connectivity to the gateway with a TLSPolicy
4) Define a default RateLimitPolicy to set some infrastructure limits on your gateway
5) Define a default AuthPolicy to deny all access to the gateway
6) Define a DNSPolicy to bring traffic to the gateway
7) Override the Gateway's deny-all AuthPolicy with an endpoint-specific policy
8) Override the Gateway rate limits with an endpoint-specific policy
You will need to set the KUBECTL_CONTEXT
environment variable for the kubectl context of the cluster you are targeting. If you have followed the single cluster setup, it should be something like below. Adjust the name of the cluster accordingly if you have followed the multi cluster setup.
#\u00a0Typical single cluster context\nexport KUBECTL_CONTEXT=kind-kuadrant-local\n\n# Example context for additional 'multi cluster' clusters\n# export KUBECTL_CONTEXT=kind-kuadrant-local-1\n
To help with this walk through, you should also set a KUADRANT_ZONE_ROOT_DOMAIN
environment variable to a domain you want to use. If you want to try DNSPolicy, this should also be a domain you have access to the DNS for in AWS Route53 or GCP. E.g.:
export KUADRANT_ZONE_ROOT_DOMAIN=my.domain.iown\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#deploy-the-example-app-we-will-serve-via-our-gateway","title":"\u2776 Deploy the example app we will serve via our gateway","text":"kubectl --context $KUBECTL_CONTEXT apply -f https://raw.githubusercontent.com/Kuadrant/kuadrant-operator/main/examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-a-new-istio-managed-gateway","title":"\u2777 Define a new Istio-managed gateway","text":"kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: api-gateway\n namespace: kuadrant-system\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$KUADRANT_ZONE_ROOT_DOMAIN\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: apps-hcpapps-tls\n kind: Secret\nEOF\n
If you take a look at the gateway status, you will see a TLS status error similar to the following:
message: invalid certificate reference /Secret/apps-hcpapps-tls. secret kuadrant-system/apps-hcpapps-tls not found\n
This is because currently there is not a TLS secret in place. Let's fix that by creating a TLSPolicy.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-tlspolicy","title":"\u2778 Define the TLSPolicy","text":"Note: For convenience, in the setup, we have created a self-signed CA as a cluster issuer in the Kubernetes cluster.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: api-gateway-tls\n namespace: kuadrant-system\nspec:\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: kuadrant-operator-glbc-ca\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait tlspolicy api-gateway-tls -n kuadrant-system --for=condition=accepted\n
Now, if you look at the status of the gateway, you will see the error is gone, and the status of the policy will report the listener as now secured with a TLS certificate and the gateway as affected by the TLS policy.
Our communication with our gateway is now secured via TLS. Note that any new listeners will also be handled by the TLSPolicy.
Let's define a HTTPRoute and test our policy. We will re-use this later on with some of the other policies as well.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1beta1\nkind: HTTPRoute\nmetadata:\n name: toystore\n labels:\n deployment: toystore\n service: toystore\nspec:\n parentRefs:\n\n - name: api-gateway\n namespace: kuadrant-system\n hostnames:\n - \"api.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/cars\"\n - method: GET\n path:\n type: PathPrefix\n value: \"/dolls\"\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/admin\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
With this HTTPRoute in place, the service we deployed is exposed via the gateway. We should be able to access our endpoint via HTTPS:
export INGRESS_HOST=$(kubectl --context $KUBECTL_CONTEXT get gtw api-gateway -o jsonpath='{.status.addresses[0].value}' -n kuadrant-system)\n\ncurl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-infrastructure-rate-limiting","title":"\u2779 Define Infrastructure Rate Limiting","text":"We have a secure communication in place. However, there is nothing limiting users from overloading our infrastructure and service components that will sit behind this gateway. Let's add a rate limiting layer to protect our services and infrastructure.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: infra-ratelimit\n namespace: kuadrant-system\nspec:\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\n limits:\n \"global\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait ratelimitpolicy infra-ratelimit -n kuadrant-system --for=condition=accepted\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
The limit here is artificially low in order for us to show it working easily. Let's test it with our endpoint:
for i in {1..10}; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" && sleep 1; done\n
We should see 409 Too Many Requests
s start returning after the 5th request.
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-gateway-authpolicy","title":"\u277a Define the Gateway AuthPolicy","text":"Communication is secured and we have some protection for our infrastructure, but we do not trust any client to access our endpoints. By default, we want to allow only authenticated access. To protect our gateway, we will add a deny-all AuthPolicy. Later, we will override this with a more specific AuthPolicy for the API.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: deny-all\n namespace: kuadrant-system\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: api-gateway\n rules:\n authorization:\n deny-all:\n opa:\n rego: \"allow = false\"\n response:\n unauthorized:\n headers:\n \"content-type\":\n value: application/json\n body:\n value: |\n {\n \"error\": \"Forbidden\",\n \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n }\nEOF\n
Let's test it again. This time we expect a 403 Forbidden
.
curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\"\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#define-the-dnspolicy","title":"\u277b Define the DNSPolicy","text":"(Skip this step if you did not configure a DNS provider during the setup.)
Now, we have our gateway protected and communications secured. We are ready to configure DNS, so it is easy for clients to connect and access the APIs we intend to expose via this gateway. Note that during the setup of this walk through, we created a DNS Provider secret and a ManagedZone resource.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: simple-dnspolicy\n namespace: kuadrant-system\nspec:\n routingStrategy: simple\n targetRef:\n name: api-gateway\n group: gateway.networking.k8s.io\n kind: Gateway\nEOF\n\nkubectl --context $KUBECTL_CONTEXT wait dnspolicy simple-dnspolicy -n kuadrant-system --for=condition=enforced\n
If you want to see the DNSRecord created by the this policy, execute the following command:
kubectl --context $KUBECTL_CONTEXT get dnsrecord.kuadrant.io api-gateway-api -n kuadrant-system -o=yaml\n
So now we have a wildcard DNS record to bring traffic to our gateway.
Let's test it again. This time we expect a 403
still as the deny-all policy is still in effect. Notice we no longer need to set the Host header directly.
Note: If you have followed through this guide on more than 1 cluster, the DNS record for the HTTPRoute hostname will have multiple IP addresses. This means that requests will be made in a round robin pattern across clusters as your DNS provider sends different responses to lookups. You may need to send multiple requests before one hits the cluster you are currently configuring.
curl -k \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" -i\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#override-the-gateways-deny-all-authpolicy","title":"\u277c Override the Gateway's deny-all AuthPolicy","text":"Next, we are going to allow authenticated access to our Toystore API. To do this, we will define an AuthPolicy that targets the HTTPRoute. Note that any new HTTPRoutes will still be affected by the gateway-level policy, but as we want users to now access this API, we need to override that policy. For simplicity, we will use API keys to authenticate the requests, though many other options are available.
Let's define an API Key for users bob and alice.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: bob-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: bob\nstringData:\n api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: alice-key\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: toystore\n annotations:\n secret.kuadrant.io/user-id: alice\nstringData:\n api_key: IAMALICE\ntype: Opaque\nEOF\n
Now, we will override the AuthPolicy to start accepting the API keys:
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n rules:\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels:\n app: toystore\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"identity\":\n json:\n properties:\n \"userid\":\n selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/secure-protect-connect/#override-the-gateways-ratelimitpolicy","title":"\u277d Override the Gateway's RateLimitPolicy","text":"The gateway limits are a good set of limits for the general case, but as the developers of this API we know that we only want to allow a certain number of requests to specific users, and a general limit for all other users.
kubectl --context $KUBECTL_CONTEXT apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"general-user\":\n rates:\n\n - limit: 1\n duration: 3\n unit: second\n counters:\n - metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: neq\n value: bob\n \"bob-limit\":\n rates:\n - limit: 2\n duration: 3\n unit: second\n when:\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: bob\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
As just another example, we have given bob twice as many requests to use compared to everyone else.
Let's test this new setup.
By sending requests as alice:
while :; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
By sending requests as bob:
while :; do curl -k --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST} --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Note: If you configured a DNS provider during the setup and defined the DNSPolicy as described in one of the previous chapters you can omit the --resolve api.${KUADRANT_ZONE_ROOT_DOMAIN}:443:${INGRESS_HOST}
flag.
Note: If you have followed through this guide on more than 1 cluster, the DNS record for the HTTPRoute hostname will have multiple IP addresses. This means that requests will be made in a round robin pattern across clusters as your DNS provider sends different responses to lookups.
while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/","title":"Simple Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.
In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.toystore.com
, where it exposes the endpoints GET /toys*
and POST /toys
, respectively, to mimic a operations of reading and writing toy records.
We will rate limit the POST /toys
endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#run-the-steps-1-3","title":"Run the steps \u2460 \u2192 \u2462","text":""},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#1-setup","title":"\u2460 Setup","text":"This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API and Kuadrant itself.
Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.
Clone the project:
git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n
Setup the environment:
make local-setup\n
Request an instance of Kuadrant:
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant\nspec: {}\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#2-deploy-the-toy-store-api","title":"\u2461 Deploy the Toy Store API","text":"Create the deployment:
kubectl apply -f examples/toystore/toystore.yaml\n
Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - api.toystore.com\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\n - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\n
Export the gateway hostname and port:
export INGRESS_HOST=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw istio-ingressgateway -n istio-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
Verify the route works:
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:
kubectl port-forward -n istio-system service/istio-ingressgateway-istio 9080:80 2>&1 >/dev/null &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#3-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2462 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"create-toy\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n routeSelectors:\n - matches: # selects the 2nd HTTPRouteRule of the targeted route\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\nEOF\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests in a loop.
Up to 5 successful (200 OK
) requests every 10 seconds to POST /toys
, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -X POST | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) to GET /toys
:
while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/#cleanup","title":"Cleanup","text":"make local-cleanup\n
"},{"location":"kuadrant-operator/examples/alerts/","title":"Index","text":""},{"location":"kuadrant-operator/examples/alerts/#slo-multi-burn-rate-multi-window-alerts","title":"SLO Multi burn rate multi window alerts","text":"Kuadrant have created two example SLO alerts to help give ideas on the types of SLO alerts that could be used with the operator. We have created one alert for latency and one for availability, both are Multiwindow, Multi-Burn-Rate Alerts. The alerts show a scenario where a 28d rolling window is used and a uptime of 99.95% i.e only 0.05% error budget margin is desired. This in real world time would be downtime of around:
Time Frame Duration Daily: 43s Weekly: 5m 2.4s Monthly: 21m 44s Quarterly: 1h 5m 12s Yearly: 4h 20m 49s These values can be changed to suit different scenarios
"},{"location":"kuadrant-operator/examples/alerts/#sloth","title":"Sloth","text":"Sloth is a tool to aid in the creation of multi burn rate and multi window SLO alerts and was used to create both the availability and latency alerts. It follows the common standard set out by Google's SRE book. Sloth generates alerts based on specific specs given. The specs for our example alerts can be found in the example/sloth folder.
"},{"location":"kuadrant-operator/examples/alerts/#metrics-used-for-the-alerts","title":"Metrics used for the alerts","text":""},{"location":"kuadrant-operator/examples/alerts/#availability","title":"Availability","text":"For the availability SLO alerts the Istio metric istio_requests_total
was used as its a counter type metric meaning the values can only increase as well as it gives information on all requests handled by the Istio proxy.
"},{"location":"kuadrant-operator/examples/alerts/#latency","title":"Latency","text":"For the availability SLO alerts the Istio metric istio_request_duration_milliseconds
was used as its a Histogram.
"},{"location":"kuadrant-operator/examples/alerts/#sloth-generation","title":"Sloth generation","text":"You can modify the examples Sloth specs we have and regenerate the prometheus rules using the Sloth CLI and the generate command. For more information please the Sloth website
sloth generate -i examples/alerts/sloth/latency.yaml --default-slo-period=28d\n
You can also use the make target to generate the rules to. make sloth-generate\n
"},{"location":"kuadrant-operator/examples/alerts/#prometheus-unit-tests","title":"Prometheus unit tests","text":"There are also two matching unit tests to verify and test the alerts that Sloth has generated. These can be run using the make target:
make alerts-tests\n
"},{"location":"authorino/","title":"Authorino","text":"Kubernetes-native authorization service for tailor-made Zero Trust API security.
A lightweight Envoy external authorization server fully manageable via Kubernetes Custom Resources. JWT authentication, API key, mTLS, pattern-matching authz, OPA, K8s SA tokens, K8s RBAC, external metadata fetching, and more, with minimum to no coding at all, no rebuilding of your applications.
Authorino is not about inventing anything new. It's about making the best things about auth out there easy and simple to use. Authorino is multi-tenant, it's cloud-native and it's open source.
"},{"location":"authorino/#getting-started","title":"Getting started","text":" - Deploy with the Authorino Operator
- Setup Envoy proxy and the external authorization filter
- Apply an Authorino
AuthConfig
custom resource - Obtain an authentication token and start sending requests
The full Getting started page of the docs provides details for the steps above, as well as information about requirements and next steps.
Or try out our Hello World example.
For general information about protecting your service using Authorino, check out the docs.
"},{"location":"authorino/#use-cases","title":"Use-cases","text":"The User guides section of the docs gathers several AuthN/AuthZ use-cases as well as the instructions to implement them using Authorino. A few examples are:
- Authentication with JWTs and OpenID Connect Discovery
- Authentication with API keys
- Authentication with Kubernetes SA tokens (TokenReview API)
- Authentication with X.509 certificates and mTLS
- Authorization with JSON pattern-matching rules (e.g. JWT claims, request attributes, etc)
- Authorization with Open Policy Agent (OPA) Rego policies
- Authorization using the Kubernetes RBAC (rules stated in K8s
Role
and RoleBinding
resources) - Authorization using auth metadata fetched from external sources
- OIDC authentication and RBAC with Keycloak JWTs
- Injecting auth data into the request (HTTP headers, Wristband tokens, rate-limit metadata, etc)
- Authorino for the Kubernetes control plane (aka Authorino as ValidatingWebhook service)
"},{"location":"authorino/#how-it-works","title":"How it works","text":"Authorino enables hybrid API security, with usually no code changes required to your application, tailor-made for your own combination of authentication standards and protocols and authorization policies of choice.
Authorino implements Envoy Proxy's external authorization gRPC protocol, and is a part of Red Hat Kuadrant architecture.
Under the hood, Authorino is based on Kubernetes Custom Resource Definitions and the Operator pattern.
Bootstrap and configuration:
- Deploy the service/API to be protected (\"Upstream\"), Authorino and Envoy
- Write and apply an Authorino
AuthConfig
Custom Resource associated to the public host of the service
Request-time:
- A user or service account (\"Consumer\") obtains an access token to consume resources of the Upstream service, and sends a request to the Envoy ingress endpoint
- The Envoy proxy establishes fast gRPC connection with Authorino carrying data of the HTTP request (context info), which causes Authorino to lookup for an
AuthConfig
Custom Resource to enforce (pre-cached) - Identity verification (authentication) phase - Authorino verifies the identity of the consumer, where at least one authentication method/identity provider must go through
- External metadata phase - Authorino fetches additional metadata for the authorization from external sources (optional)
- Policy enforcement (authorization) phase - Authorino takes as input a JSON composed out of context data, resolved identity object and fetched additional metadata from previous phases, and triggers the evaluation of user-defined authorization policies
- Response (metadata-out) phase \u2013 Authorino builds user-defined custom responses (dynamic JSON objects and/or Festival Wristband OIDC tokens), to be supplied back to the client and/or upstream service within added HTTP headers or as Envoy Dynamic Metadata (optional)
- Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints (optional)
- Authorino and Envoy settle the authorization protocol with either OK/NOK response
- If authorized, Envoy triggers other HTTP filters in the chain (if any), pre-injecting eventual dynamic metadata returned by Authorino, and ultimately redirects the request to the Upstream
- The Upstream serves the requested resource to the consumer
More The Architecture section of the docs covers details of protecting your APIs with Envoy and Authorino, including information about topology (centralized gateway, centralized authorization service or sidecars), deployment modes (cluster-wide reconciliation vs. namespaced instances), an specification of Authorino's AuthConfig
Custom Resource Definition (CRD) and more.
You will also find in that section information about what happens in request-time (aka Authorino's Auth Pipeline) and how to leverage the Authorization JSON for writing policies, dynamic responses and other features of Authorino.
"},{"location":"authorino/#list-of-features","title":"List of features","text":"Feature Stage Identity verification & authentication JOSE/JWT validation (OpenID Connect) Ready OAuth 2.0 Token Introspection (opaque tokens) Ready Kubernetes TokenReview (SA tokens) Ready OpenShift User-echo endpoint In analysis API key authentication Ready mTLS authentication Ready HMAC authentication Planned (#9) Plain (resolved beforehand and injected in the payload) Ready Anonymous access Ready Ad hoc external metadata fetching OpenID Connect User Info Ready UMA-protected resource attributes Ready HTTP GET/GET-by-POST Ready Policy enforcement/authorization JSON pattern matching (e.g. JWT claims, request attributes checking) Ready OPA/Rego policies (inline and pull from registry) Ready Kubernetes SubjectAccessReview (resource and non-resource attributes) Ready Authzed/SpiceDB Ready Keycloak Authorization Services (UMA-compliant Authorization API) In analysis Custom responses Festival Wristbands tokens (token normalization, Edge Authentication Architecture) Ready JSON injection (header injection, Envoy Dynamic Metadata) Ready Plain text value (header injection) Ready Custom response status code/messages (e.g. redirect) Ready Callbacks HTTP endpoints Ready Caching OpenID Connect and User-Managed Access configs Ready JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS) Ready Access tokens Ready External metadata Ready Precompiled Rego policies Ready Policy evaluation Ready Sharding (lookup performance, multitenancy) Ready For a detailed description of the features above, refer to the Features page.
"},{"location":"authorino/#faq","title":"FAQ","text":"Do I need to deploy Envoy? Authorino is built from the ground up to work well with Envoy. It is strongly recommended that you leverage Envoy along side Authorino. That said, it is possible to use Authorino without Envoy.
Authorino implements Envoy's external authorization gRPC protocol and therefore will accept any client request that complies.
Authorino also provides a second interface for raw HTTP authorization, suitable for using with Kubernetes ValidatingWebhook and other integrations (e.g. other proxies).
The only attribute of the authorization request that is strictly required is the host name. (See Host lookup for more information.) The other attributes, such as method, path, headers, etc, might as well be required, depending on each AuthConfig
. In the case of the gRPC CheckRequest
method, the host is supplied in Attributes.Request.Http.Host
and alternatively in Attributes.ContextExtensions[\"host\"]
. For raw HTTP authorization requests, the host must be supplied in Host
HTTP header.
Check out Kuadrant for easy-to-use Envoy and Authorino deployment & configuration for API management use-cases, using Kubernetes Custom Resources.
Is Authorino an Identity Provider (IdP)? No, Authorino is not an Identity Provider (IdP). Neither it is an auth server of any kind, such as an OAuth2 server, an OpenID Connect (OIDC) server, a Single Sign On (SSO) server.
Authorino is not an identity broker either. It can verify access tokens from multiple trusted sources of identity and protocols, but it will not negotiate authentication flows for non-authenticated access requests. Some tricks nonetheless can be done, for example, to redirect unauthenticated users to a login page.
For an excellent auth server that checks all the boxes above, check out Keycloak.
How does Authorino compare to Keycloak? Keycloak is a proper auth server and identity provider (IdP). It offers a huge set of features for managing identities, identity sources with multiple user federation options, and a platform for authentication and authorization services.
Keycloak exposes authenticators that implement protocols such as OpenID Connect. The is a one-time flow that establishes the delegation of power to a client, for a short period of time. To be consistent with Zero Trust security, you want a validator to verify the short-lived tokens in every request that tries to reach your protected service/resource. This step that will repeat everytime could save heavy looking up into big tables of tokens and leverage cached authorization policies for fast in-memory evaluation. This is where Authorino comes in.
Authorino verifies and validates Keycloak-issued ID tokens. OpenID Connect Discovery is used to request and cache JSON Web Key Sets (JWKS), used to verify the signature of the tokens without having to contact again with the Keycloak server, or looking in a table of credentials. Moreover, user long-lived credentials are safe, rather than spread in hops across the network.
You can also use Keycloak for storing auth-relevant resource metadata. These can be fetched by Authorino in request-time, to be combined into your authorization policies. See Keycloak Authorization Services and User-Managed Access (UMA) support, as well as Authorino UMA external metadata counter-part.
Why doesn't Authorino handle OAuth flows? It has to do with trust. OAuth grants are supposed to be negotiated directly between whoever owns the long-lived credentials in one hand (user, service accounts), and the trustworthy auth server that receives those credentials \u2013 ideally with minimum number of hops in the middle \u2013 and exchanges them for short-lived access tokens, on the other end.
There are use-cases for Authorino running in the edge (e.g. Edge Authentication Architecture and token normalization), but in most cases Authorino should be seen as a last-mile component that provides decoupled identity verification and authorization policy enforcement to protected services in request-time. In this sense, the OAuth grant is a pre-flight exchange that happens once and as direct and safe as possible, whereas auth enforcement is kept lightweight and efficient.
Where does Authorino store users and roles? Authorino does not store users, roles, role bindings, access control lists, or any raw authorization data. Authorino handles policies, where even these policies can be stored elsewhere (as opposed to stated inline inside of an Authorino AuthConfig
CR).
Authorino evaluates policies for stateless authorization requests. Any additional context is either resolved from the provided payload or static definitions inside the policies. That includes extracting user information from a JWT or client TLS certificate, requesting user metadata from opaque authentication tokens (e.g. API keys) to the trusted sources actually storing that content, obtaining synchronous HTTP metadata from services, etc.
In the case of authentication with API keys, as well as its derivative to model HTTP Basic Auth, user data are stored in Kubernetes Secret
s. The secret's keys, annotations and labels are usually the structures used to organize the data that later a policy evaluated in Authorino may require. Strictly, those are not Authorino data structures.
Can't I just use Envoy JWT Authentication and RBAC filters? Envoy's JWT Authentication works pretty much similar to Authorino's JOSE/JWT verification and validation for OpenID Connect. In both cases, the JSON Web Key Sets (JWKS) to verify the JWTs are auto-loaded and cached to be used in request-time. Moreover, you can configure for details such as where to extract the JWT from the HTTP request (header, param or cookie) and do some cool tricks regarding how dynamic metadata based on JWT claims can be injected to consecutive filters in the chain.
However, in terms of authorization, while Envoy's implementation essentially allows to check for the list of audiences (aud
JWT claim), Authorino opens up for a lot more options such as pattern-matching rules with operators and conditionals, built-in OPA and other methods of evaluating authorization policies.
Authorino also allows to combine JWT authentication with other types of authentication to support different sources of identity and groups of users such as API keys, Kubernetes tokens, OAuth opaque tokens , etc.
In summary, Envoy's JWT Authentication and Envoy RBAC filter are excellent solutions for simple use-cases where JWTs from one single issuer is the only authentication method you are planning to support and limited to no authorization rules suffice. On the other hand, if you need to integrate more identity sources, different types of authentication, authorization policies, etc, you might to consider Authorino.
Should I use Authorino if I already have Istio configured? Istio is a great solution for managing service meshes. It delivers an excellent platform with an interesting layer of abstraction on top of Envoy proxy's virtual omnipresence within the mesh.
There are lots of similarities, but also complementarity between Authorino and Istio and Istio Authorization in special.
Istio provides a simple way to enable features that are, in many cases, features of Envoy, such as authorization based on JWTs, authorization based on attributes of the request, and activation of external authorization services, without having to deal with complex Envoy config files. See Kuadrant for a similar approach, nonetheless leveraging features of Istio as well.
Authorino is an Envoy-compatible external authorization service. One can use Authorino with or without Istio.
In particular, Istio Authorization Policies can be seen, in terms of functionality and expressiveness, as a subset of one type of authorization policies supported by Authorino, the pattern-matching authorization policies. While Istio, however, is heavily focused on specific use cases of API Management, offering a relatively limited list of supported attribute conditions, Authorino is more generic, allowing to express authorization rules for a wider spectrum of use cases \u2013 ACLs, RBAC, ABAC, etc, pretty much counting on any attribute of the Envoy payload, identity object and external metadata available.
Authorino also provides built-in OPA authorization, several other methods of authentication and identity verification (e.g. Kubernetes token validation, API key-based authentication, OAuth token introspection, OIDC-discoverable JWT verification, etc), and features like fetching of external metadata (HTTP services, OIDC userinfo, UMA resource data), token normalization, wristband tokens and dynamic responses. These all can be used independently or combined, in a simple and straightforward Kubernetes-native fashion.
In summary, one might value Authorino when looking for a policy enforcer that offers:
- multiple supported methods and protocols for rather hybrid authentication, encompassing future and legacy auth needs;
- broader expressiveness and more functionalities for the authorization rules;
- authentication and authorization in one single declarative manifest;
- capability to fetch auth metadata from external sources on-the-fly;
- built-in OPA module;
- easy token normalization and/or aiming for Edge Authentication Architecture (EAA).
The good news is that, if you have Istio configured, then you have Envoy and the whole platform for wiring Authorino up if you want to. \ud83d\ude09
Do I have to learn OPA/Rego language to use Authorino? No, you do not. However, if you are comfortable with Rego from Open Policy Agent (OPA), there are some quite interesting things you can do in Authorino, just as you would in any OPA server or OPA plugin, but leveraging Authorino's built-in OPA module instead. Authorino's OPA module is compiled as part of Authorino's code directly from the Golang packages, and imposes no extra latency to the evaluation of your authorization policies. Even the policies themselves are pre-compiled in reconciliation-time, for fast evaluation afterwards, in request-time.
On the other hand, if you do not want to learn Rego or in any case would like to combine it with declarative and Kubernetes-native authN/authZ spec for your services, Authorino does complement OPA with at least two other methods for expressing authorization policies \u2013 i.e. pattern-matching authorization and Kubernetes SubjectAccessReview, the latter allowing to rely completely on the Kubernetes RBAC.
You break down, mix and combine these methods and technolgies in as many authorization policies as you want, potentially applying them according to specific conditions. Authorino will trigger the evaluation of concurrent policies in parallel, aborting the context if any of the processes denies access.
Authorino also packages well-established industry standards and protocols for identity verification (JOSE/JWT validation, OAuth token introspection, Kubernetes TokenReview) and ad-hoc request-time metadata fetching (OIDC userinfo, User-Managed Access (UMA)), and corresponding layers of caching, without which such functionalities would have to be implemented by code.
Can I use Authorino to protect non-REST APIs? Yes, you can. In principle, the API format (REST, gRPC, GraphQL, etc) should not matter for the authN/authZ enforcer. There are a couple points to consider though.
While REST APIs are designed in a way that, in most cases, information usually needed for the evaluation of authorization policies are available in the metadata of the HTTP request (method, path, headers), other API formats quite often will require processing of the HTTP body. By default, Envoy's external authorization HTTP filter will not forward the body of the request to Authorino; to change that, enable the with_request_body
option in the Envoy configuration for the external authorization filter. E.g.:
with_request_body:\n max_request_bytes: 1024\n allow_partial_message: true\n pack_as_bytes: true\n
Additionally, when enabling the request body passed in the payload to Authorino, parsing of the content should be of concern as well. Authorino provides easy access to attributes of the HTTP request, parsed as part of the Authorization JSON, however the body of the request is passed as string and should be parsed by the user according to each case.
Check out Authorino OPA authorization and the Rego Encoding functions for options to parse serialized JSON, YAML and URL-encoded params. For XML transformation, an external parsing service connected via Authorino's HTTP GET/GET-by-POST external metadata might be required.
Can I run Authorino other than on Kubernetes? As of today, no, you cannot, or at least it wouldn't suit production requirements.
Do I have to be admin of the cluster to install Authorino? To install the Authorino Custom Resource Definition (CRD) and to define cluster roles required by the Authorino service, admin privilege to the Kubernetes cluster is required. This step happens only once per cluster and is usually equivalent to installing the Authorino Operator.
Thereafter, deploying instances of the Authorino service and applying AuthConfig
custom resources to a namespace depend on the permissions set by the cluster administrator \u2013 either directly by editing the bindings in the cluster's RBAC, or via options of the operator. In most cases, developers will be granted permissions to create and manage AuthConfig
s, and sometimes to deploy their own instances of Authorino.
Is it OK to store AuthN/AuthZ configs as Kubernetes objects? Authorino's API checks all the bullets to be aggregated to the Kubernetes cluster APIs, and therefore using Custom Resource Definition (CRD) and the Operator pattern has always been an easy design decision.
By merging the definitions of service authN/authZ to the control plane, Authorino AuthConfig
resources can be thought as extensions of the specs of the desired state of services regarding the data flow security. The Authorino custom controllers, built-in into the authorization service, are the agents that read from that desired state and reconcile the processes operating in the data plane.
Authorino is declarative and seamless for developers and cluster administrators managing the state of security of the applications running in the server, used to tools such as kubectl
, the Kubernetes UI and its dashboards. Instead of learning about yet another configuration API format, Authorino users can jump straight to applying and editing YAML or JSON structures they already know, in a way that things such as spec
, status
, namespace
and labels
have the meaning they are expected to have, and docs are as close as kubectl explain
. Moreover, Authorino does not pile up any other redundant layers of APIs, event-processing, RBAC, transformation and validation webhooks, etc. It is Kubernetes in its best.
In terms of scale, Authorino AuthConfig
s should grow proportionally to the number of protected services, virtually limited by nothing but the Kubernetes API data storage, while namespace division and label selectors help adjust horizontally and keep distributed.
In other words, there are lots of benefits of using Kubernetes custom resources and custom controllers, and unless you are planning on bursting your server with more services than it can keep record, it is totally \ud83d\udc4d to store your AuthN/AuthZ configs as cluster API objects.
Can I use Authorino for rate limiting? You can, but you shouldn't. Check out instead Limitador, for simple and efficient global rate limiting. Combine it with Authorino and Authorino's support for Envoy Dynamic Metadata for authenticated rate limiting.
"},{"location":"authorino/#benchmarks","title":"Benchmarks","text":"Configuration of the tests (Authorino features):
Performance test Identity Metadata Authorization Response ReconcileAuthConfig
OIDC/JWT UserInfo, UMA OPA(inline Rego) - AuthPipeline
OIDC/JWT - JSON pattern-matching(JWT claim check) - APIKeyAuthn
API key N/A N/A N/A JSONPatternMatchingAuthz
N/A N/A JSON pattern-matching N/A OPAAuthz
N/A N/A OPA(inline Rego) N/A Platform: linux/amd64 CPU: Intel\u00ae Xeon\u00ae Platinum 8370C 2.80GHz Cores: 1, 4, 10
Results:
ReconcileAuthConfig:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 1.533m \u00b1 2% 264.4Ki \u00b1 0% 6.470k \u00b1 0%\n*-4 1.381m \u00b1 6% 264.5Ki \u00b1 0% 6.471k \u00b1 0%\n*-10 1.563m \u00b1 5% 270.2Ki \u00b1 0% 6.426k \u00b1 0%\ngeomean 1.491m 266.4Ki 6.456k\n\nAuthPipeline:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 388.0\u00b5 \u00b1 2% 80.70Ki \u00b1 0% 894.0 \u00b1 0%\n*-4 348.4\u00b5 \u00b1 5% 80.67Ki \u00b1 2% 894.0 \u00b1 3%\n*-10 356.4\u00b5 \u00b1 2% 78.97Ki \u00b1 0% 860.0 \u00b1 0%\ngeomean 363.9\u00b5 80.11Ki 882.5\n\nAPIKeyAuthn:\n\n \u2502 sec/op \u2502 B/op \u2502 allocs/op \u2502\n\n* 3.246\u00b5 \u00b1 1% 480.0 \u00b1 0% 6.000 \u00b1 0%\n*-4 3.111\u00b5 \u00b1 0% 480.0 \u00b1 0% 6.000 \u00b1 0%\n*-10 3.091\u00b5 \u00b1 1% 480.0 \u00b1 0% 6.000 \u00b1 0%\ngeomean 3.148\u00b5 480.0 6.000\n\nOPAAuthz vs JSONPatternMatchingAuthz:\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 sec/op \u2502 sec/op vs base \u2502\n\n* 87.469\u00b5 \u00b1 1% 1.797\u00b5 \u00b1 1% -97.95% (p=0.000 n=10)\n*-4 95.954\u00b5 \u00b1 3% 1.766\u00b5 \u00b1 0% -98.16% (p=0.000 n=10)\n*-10 96.789\u00b5 \u00b1 4% 1.763\u00b5 \u00b1 0% -98.18% (p=0.000 n=10)\ngeomean 93.31\u00b5 1.775\u00b5 -98.10%\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 B/op \u2502 B/op vs base \u2502\n\n* 28826.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\n*-4 28844.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\n*-10 28862.00 \u00b1 0% 64.00 \u00b1 0% -99.78% (p=0.000 n=10)\ngeomean 28.17Ki 64.00 -99.78%\n\n \u2502 OPAAuthz \u2502 JSONPatternMatchingAuthz \u2502\n \u2502 allocs/op \u2502 allocs/op vs base \u2502\n\n* 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\n*-4 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\n*-10 569.000 \u00b1 0% 2.000 \u00b1 0% -99.65% (p=0.000 n=10)\ngeomean 569.0 2.000 -99.65%\n
"},{"location":"authorino/#contributing","title":"Contributing","text":"If you are interested in contributing to Authorino, please refer to the Developer's guide for info about the stack and requirements, workflow, policies and Code of Conduct.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"authorino/docs/","title":"Documentation","text":""},{"location":"authorino/docs/#getting-started","title":"Getting started","text":""},{"location":"authorino/docs/#terminology","title":"Terminology","text":""},{"location":"authorino/docs/#architecture","title":"Architecture","text":""},{"location":"authorino/docs/#feature-description","title":"Feature description","text":""},{"location":"authorino/docs/#user-guides","title":"User guides","text":""},{"location":"authorino/docs/#developers-guide","title":"Developer\u2019s guide","text":""},{"location":"authorino/docs/architecture/","title":"Architecture","text":""},{"location":"authorino/docs/architecture/#overview","title":"Overview","text":"There are a few concepts to understand Authorino's architecture. The main components are: Authorino, Envoy and the Upstream service to be protected. Envoy proxies requests to the configured virtual host upstream service, first contacting with Authorino to decide on authN/authZ.
The topology can vary from centralized proxy and centralized authorization service, to dedicated sidecars, with the nuances in between. Read more about the topologies in the Topologies section below.
Authorino is deployed using the Authorino Operator, from an Authorino
Kubernetes custom resource. Then, from another kind of custom resource, the AuthConfig
CRs, each Authorino instance reads and adds to the index the exact rules of authN/authZ to enforce for each protected host (\"index reconciliation\").
Everything that the AuthConfig reconciler can fetch in reconciliation-time is stored in the index. This is the case of static parameters such as signing keys, authentication secrets and authorization policies from external policy registries.
AuthConfig
s can refer to identity providers (IdP) and trusted auth servers whose access tokens will be accepted to authenticate to the protected host. Consumers obtain an authentication token (short-lived access token or long-lived API key) and send those in the requests to the protected service.
When Authorino is triggered by Envoy via the gRPC interface, it starts evaluating the Auth Pipeline, i.e. it applies to the request the parameters to verify the identity and to enforce authorization, as found in the index for the requested host (See host lookup for details).
Apart from static rules, these parameters can include instructions to contact online with external identity verifiers, external sources of metadata and policy decision points (PDPs).
On every request, Authorino's \"working memory\" is called Authorization JSON, a data structure that holds information about the context (the HTTP request) and objects from each phase of the auth pipeline: i.e., authentication verification (phase i), ad-hoc metadata fetching (phase ii), authorization policy enforcement (phase iii), dynamic response (phase iv), and callbacks (phase v). The evaluators in each of these phases can both read and write from the Authorization JSON for dynamic steps and decisions of authN/authZ.
"},{"location":"authorino/docs/architecture/#topologies","title":"Topologies","text":"Typically, upstream APIs are deployed to the same Kubernetes cluster and namespace where the Envoy proxy and Authorino is running (although not necessarily). Whatever is the case, Envoy must be proxying to the upstream API (see Envoy's HTTP route components and virtual hosts) and pointing to Authorino in the external authorization filter.
This can be achieved with different topologies:
- Envoy can be a centralized gateway with one dedicated instance of Authorino, proxying to one or more upstream services
- Envoy can be deployed as a sidecar of each protected service, but still contacting from a centralized Authorino authorization service
- Both Envoy and Authorino deployed as sidecars of the protected service, restricting all communication between them to localhost
Each topology above induces different measures for security.
"},{"location":"authorino/docs/architecture/#centralized-gateway","title":"Centralized gateway","text":"Recommended in the protected services to validate the origin of the traffic. It must have been proxied by Envoy. See Authorino JSON injection for an extra validation option using a shared secret passed in HTTP header.
"},{"location":"authorino/docs/architecture/#centralized-authorization-service","title":"Centralized authorization service","text":"Protected service should only listen on localhost
and all traffic can be considered safe.
"},{"location":"authorino/docs/architecture/#sidecars","title":"Sidecars","text":"Recommended namespaced
instances of Authorino with fine-grained label selectors to avoid unnecessary caching of AuthConfig
s.
Apart from that, protected service should only listen on localhost
and all traffic can be considered safe.
"},{"location":"authorino/docs/architecture/#cluster-wide-vs-namespaced-instances","title":"Cluster-wide vs. Namespaced instances","text":"Authorino instances can run in either cluster-wide or namespaced mode.
Namespace-scoped instances only watch resources (AuthConfig
s and Secret
s) created in a given namespace. This deployment mode does not require admin privileges over the Kubernetes cluster to deploy the instance of the service (given Authorino's CRDs have been installed beforehand, such as when Authorino is installed using the Authorino Operator).
Cluster-wide deployment mode, in contraposition, deploys instances of Authorino that watch resources across the entire cluster, consolidating all resources into a multi-namespace index of auth configs. Admin privileges over the Kubernetes cluster is required to deploy Authorino in cluster-wide mode.
Be careful to avoid superposition when combining multiple Authorino instances and instance modes in the same Kubernetes cluster. Apart from caching unnecessary auth config data in the instances depending on your routing settings, the leaders of each instance (set of replicas) may compete for updating the status of the custom resources that are reconciled. See Resource reconciliation and status update for more information.
If necessary, use label selectors to narrow down the space of resources watched and reconciled by each Authorino instance. Check out the Sharding section below for details.
"},{"location":"authorino/docs/architecture/#the-authorino-authconfig-custom-resource-definition-crd","title":"The Authorino AuthConfig
Custom Resource Definition (CRD)","text":"The desired protection for a service is declaratively stated by applying an AuthConfig
Custom Resource to the Kubernetes cluster running Authorino.
An AuthConfig
resource typically looks like the following:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n # The list of public host names of the services protected by this AuthConfig resource.\n # Authorino uses the host name provided in the payload of external authorization request to lookup for the corresponding AuthConfig to enforce.\n # Hostname collisions are prevented by rejecting to index a hostname already taken by another AuthConfig.\n # Format: hostname[:port]\n hosts:\n\n - my-api.io:443 # north-south traffic\n - my-api.ns.svc.cluster.local # east-west traffic\n\n # Set of stored named patterns to be reused in conditions and pattern-matching authorization rules\n patterns: {\"name\" \u2192 {selector, operator, value}, \u2026}\n\n # Top-level conditions for the AuthConfig to be enforced.\n # If omitted, the AuthConfig will be enforced at all requests.\n # If present, all conditions must match for the AuthConfig to be enforced; otherwise, Authorino skips the AuthConfig and returns to the auth request with status OK.\n when: [{selector, operator, value | named pattern ref}, \u2026]\n\n # List of one or more trusted sources of identity:\n # - Configurations to verify JSON Web Tokens (JWTs) issued by an OpenID Connect (OIDC) server\n # - Endpoints for OAuth 2.0 token introspection\n # - Attributes for the Kubernetes `TokenReview` API\n # - Label selectors for API keys (stored in Kubernetes `Secret`s)\n # - Label selectors trusted x509 issuer certificates (stored in Kubernetes `Secret`s)\n # - Selectors for plain identity objects supplied in the payload of the authorization request\n # - Anonymous access configs\n authentication: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of sources of external metadata for the authorization (optional):\n # - Endpoints for HTTP GET or GET-by-POST requests\n # - OIDC UserInfo endpoints (associated with an OIDC token issuer specified in the authentication configs)\n # - User-Managed Access (UMA) resource registries\n metadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of authorization policies to be enforced (optional):\n # - Pattern-matching rules (e.g. `context.request.http.path eq '/pets'`)\n # - Open Policy Agent (OPA) inline or external Rego policies\n # - Attributes for the Kubernetes `SubjectAccessReview` API\n # \u2013 Attributes for authorization with an external SpiceDB server\n authorization: {\"name\" \u2192 {\u2026}, \u2026}\n\n # Customization to the response to the external authorization request (optional)\n response:\n # List of dynamic response elements into the request on success authoization (optional):\n # - Plain text\n # - JSON objects\n # - Festival Wristbands (signed JWTs issued by Authorino)\n success:\n # List of HTTP headers to inject into the request post-authorization (optional):\n headers: {\"name\" \u2192 {\u2026}, \u2026}\n\n # List of Envoy Dynamic Metadata to inject into the request post-authorization (optional):\n dynamicMetadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n # Custom HTTP status code, message and headers to replace the default `401 Unauthorized` response (optional)\n unauthenticated:\n code: 302\n message: Redirecting to login\n headers:\n \"Location\":\n value: https://my-app.io/login\n\n # Custom HTTP status code, message and headers to replace the default `and `403 Forbidden` response (optional)\n unauthorized: {code, message, headers, body}\n\n # List of callback targets:\n # - Endpoints for HTTP requests\n callbacks: {\"name\" \u2192 {\u2026}, \u2026}\n
Check out the OAS of the AuthConfig
CRD for a formal specification of the options for authentication
verification, external metadata
fetching, authorization
policies, and dynamic response
, as well as any other host protection capability implemented by Authorino.
You can also read the specification from the CLI using the kubectl explain
command. The Authorino CRD is required to have been installed in Kubernetes cluster. E.g. kubectl explain authconfigs.spec.authentication.overrides
.
A complete description of supported features and corresponding configuration options within an AuthConfig
CR can be found in the Features page.
More concrete examples of AuthConfig
s for specific use-cases can be found in the User guides.
"},{"location":"authorino/docs/architecture/#resource-reconciliation-and-status-update","title":"Resource reconciliation and status update","text":"The instances of the Authorino authorization service workload, following the Operator pattern, watch events related to the AuthConfig
custom resources, to build and reconcile an in-memory index of configs. Whenever a replica receives traffic for authorization request, it looks up in the index of AuthConfig
s and then triggers the \"Auth Pipeline\", i.e. enforces the associated auth spec onto the request.
An instance can be a single authorization service workload or a set of replicas. All replicas watch and reconcile the same set of resources that match the --auth-config-label-selector
and --secret-label-selector
configuration options. (See both Cluster-wide vs. Namespaced instances and Sharding, for details about defining the reconciliation space of Authorino instances.)
The above means that all replicas of an Authorino instance should be able to receive traffic for authorization requests.
Among the multiple replicas of an instance, Authorino elects one replica to be leader. The leader is responsible for updating the status of reconciled AuthConfig
s. If the leader eventually becomes unavailable, the instance will automatically elect another replica take its place as the new leader.
The status of an AuthConfig
tells whether the resource is \"ready\" (i.e. indexed). It also includes summary information regarding the numbers of authentication configs, metadata configs, authorization configs and response configs within the spec, as well as whether Festival Wristband tokens are being issued by the Authorino instance as by spec.
Apart from watching events related to AuthConfig
custom resources, Authorino also watches events related to Kubernetes Secret
s, as part of Authorino's API key authentication feature. Secret
resources that store API keys are linked to their corresponding AuthConfig
s in the index. Whenever the Authorino instance detects a change in the set of API key Secret
s linked to an AuthConfig
s, the instance reconciles the index.
Authorino only watches events related to Secret
s whose metadata.labels
match the label selector --secret-label-selector
of the Authorino instance. The default values of the label selector for Kubernetes Secret
s representing Authorino API keys is authorino.kuadrant.io/managed-by=authorino
.
"},{"location":"authorino/docs/architecture/#the-auth-pipeline-aka-enforcing-protection-in-request-time","title":"The \"Auth Pipeline\" (aka: enforcing protection in request-time)","text":"In each request to the protected API, Authorino triggers the so-called \"Auth Pipeline\", a set of configured evaluators that are organized in a 5-phase pipeline:
- (i) Authentication phase: at least one source of identity (i.e., one authentication config) must resolve the supplied credential in the request into a valid identity or Authorino will otherwise reject the request as unauthenticated (401 HTTP response status).
- (ii) Metadata phase: optional fetching of additional data from external sources, to add up to context and identity information, and used in authorization policies, dynamic responses and callback requests (phases iii to v).
- (iii) Authorization phase: all unskipped policies must evaluate to a positive result (\"authorized\"), or Authorino will otherwise reject the request as unauthorized (403 HTTP response code).
- (iv) Response phase \u2013 Authorino builds all user-defined response items (dynamic JSON objects and/or Festival Wristband OIDC tokens), which are supplied back to the external authorization client within added HTTP headers or as Envoy Dynamic Metadata
- (v) Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints.
Each phase is sequential to the other, from (i) to (v), while the evaluators within each phase are triggered concurrently or as prioritized. The Authentication phase (i) is the only one required to list at least one evaluator (i.e. 1+ authentication configs); Metadata, Authorization and Response phases can have any number of evaluators (including zero, and even be omitted in this case).
"},{"location":"authorino/docs/architecture/#host-lookup","title":"Host lookup","text":"Authorino reads the request host from Attributes.Http.Host
of Envoy's CheckRequest
type, and uses it as key to lookup in the index of AuthConfig
s, matched against spec.hosts
.
Alternatively to Attributes.Http.Host
, a host
entry can be supplied in the Attributes.ContextExtensions
map of the external authorino request. This will take precedence before the host attribute of the HTTP request.
The host
context extension is useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup with lookup strongly dictated by the external authorization client (e.g. Envoy), which often knows about routing and the expected AuthConfig
to enforce beyond what Authorino can infer strictly based on the host name.
Wildcards can also be used in the host names specified in the AuthConfig
, resolved by Authorino. E.g. if *.pets.com
is in spec.hosts
, Authorino will match the concrete host names dogs.pets.com
, cats.pets.com
, etc. In case, of multiple possible matches, Authorino will try the longest match first (in terms of host name labels) and fall back to the closest wildcard upwards in the domain tree (if any).
When more than one host name is specified in the AuthConfig
, all of them can be used as key, i.e. all of them can be requested in the authorization request and will be mapped to the same config.
Example. Host lookup with wildcards.
The domain tree above induces the following relation:
foo.nip.io
\u2192 authconfig-1
(matches *.io
) talker-api.nip.io
\u2192 authconfig-2
(matches talker-api.nip.io
) dogs.pets.com
\u2192 authconfig-2
(matches *.pets.com
) api.acme.com
\u2192 authconfig-3
(matches api.acme.com
) www.acme.com
\u2192 authconfig-4
(matches *.acme.com
) foo.org
\u2192 404 Not found
The host can include the port number (i.e. hostname:port
) or it can be just the name of the host name. Authorino will first try finding in the index a config associated to hostname:port
, as supplied in the authorization request; if the index misses an entry for hostname:port
, Authorino will then remove the :port
suffix and repeat the lookup using just hostname
as key. This provides implicit support for multiple port numbers for a same host without having to list all combinations in the AuthConfig
.
"},{"location":"authorino/docs/architecture/#avoiding-host-name-collision","title":"Avoiding host name collision","text":"Authorino tries to prevent host name collision between AuthConfig
s by rejecting to link in the index any AuthConfig
and host name if the host name is already linked to a different AuthConfig
in the index. This was intentionally designed to prevent users from superseding each other's AuthConfig
s, partially or fully, by just picking the same host names or overlapping host names as others.
When wildcards are involved, a host name that matches a host wildcard already linked in the index to another AuthConfig
will be considered taken, and therefore the newest AuthConfig
will be rejected to be linked to that host.
This behavior can be disabled to allow AuthConfig
s to partially supersede each others' host names (limited to strict host subsets), by supplying the --allow-superseding-host-subsets
command-line flag when running the Authorino instance.
"},{"location":"authorino/docs/architecture/#the-authorization-json","title":"The Authorization JSON","text":"On every Auth Pipeline, Authorino builds the Authorization JSON, a \"working-memory\" data structure composed of context
(information about the request, as supplied by the Envoy proxy to Authorino) and auth
(objects resolved in phases (i) to (v) of the pipeline). The evaluators of each phase can read from the Authorization JSON and implement dynamic properties and decisions based on its values.
At phase (iii), the authorization evaluators count on an Authorization JSON payload that looks like the following:
// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n \"context\": { // the input from the proxy\n \"origin\": {\u2026},\n \"request\": {\n \"http\": {\n \"method\": \"\u2026\",\n \"headers\": {\u2026},\n \"path\": \"/\u2026\",\n \"host\": \"\u2026\",\n \u2026\n }\n }\n },\n \"auth\": {\n \"identity\": {\n // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n },\n \"metadata\": {\n // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n }\n }\n}\n
The policies evaluated can use any data from the authorization JSON to define authorization rules.
After phase (iii), Authorino appends to the authorization JSON the results of this phase as well, and the payload available for phase (iv) becomes:
// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n \"context\": { // the input from the proxy\n \"origin\": {\u2026},\n \"request\": {\n \"http\": {\n \"method\": \"\u2026\",\n \"headers\": {\u2026},\n \"path\": \"/\u2026\",\n \"host\": \"\u2026\",\n \u2026\n }\n }\n },\n \"auth\": {\n \"identity\": {\n // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n },\n \"metadata\": {\n // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n },\n \"authorization\": {\n // each authorization policy result resolved by the evaluators of phase (iii), by name of the evaluator\n }\n }\n}\n
Festival Wristbands and Dynamic JSON responses can include dynamic values (custom claims/properties) fetched from the authorization JSON. These can be returned to the external authorization client in added HTTP headers or as Envoy Well Known Dynamic Metadata. Check out Custom response features for details.
For information about reading and fetching data from the Authorization JSON (syntax, functions, etc), check out JSON paths.
"},{"location":"authorino/docs/architecture/#raw-http-authorization-interface","title":"Raw HTTP Authorization interface","text":"Besides providing the gRPC authorization interface \u2013 that implements the Envoy gRPC authorization server \u2013, Authorino also provides another interface for raw HTTP authorization. This second interface responds to GET
and POST
HTTP requests sent to :5001/check
, and is suitable for other forms of integration, such as:
- using Authorino as Kubernetes ValidatingWebhook service (example);
- other HTTP proxies and API gateways;
- old versions of Envoy incompatible with the latest version of gRPC external authorization protocol (Authorino is based on v3.19.1 of Envoy external authorization API)
In the raw HTTP interface, the host used to lookup for an AuthConfig
must be supplied in the Host
HTTP header of the request. Other attributes of the HTTP request are also passed in the context to evaluate the AuthConfig
, including the body of the request.
"},{"location":"authorino/docs/architecture/#caching","title":"Caching","text":""},{"location":"authorino/docs/architecture/#openid-connect-and-user-managed-access-configs","title":"OpenID Connect and User-Managed Access configs","text":"OpenID Connect and User-Managed Access configurations, discovered usually at reconciliation-time from well-known discovery endpoints.
Cached individual OpenID Connect configurations discovered by Authorino can be configured to be auto-refreshed, by setting the corresponding spec.authentication.jwt.ttl
field in the AuthConfig (given in seconds, default: 0
\u2013 i.e. no cache update).
"},{"location":"authorino/docs/architecture/#json-web-keys-jwks-and-json-web-key-sets-jwks","title":"JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS)","text":"JSON signature verification certificates linked by discovered OpenID Connect configurations, fetched usually at reconciliation-time.
"},{"location":"authorino/docs/architecture/#revoked-access-tokens","title":"Revoked access tokens","text":"Not implemented - In analysis (#19) Caching of access tokens identified and or notified as revoked prior to expiration.
"},{"location":"authorino/docs/architecture/#external-metadata","title":"External metadata","text":"Not implemented - Planned (#21) Caching of resource data obtained in previous requests.
"},{"location":"authorino/docs/architecture/#compiled-rego-policies","title":"Compiled Rego policies","text":"Performed automatically by Authorino at reconciliation-time for the authorization policies based on the built-in OPA module.
Precompiled and cached individual Rego policies originally pulled by Authorino from external registries can be configured to be auto-refreshed, by setting the corresponding spec.authorization.opa.externalRegistry.ttl
field in the AuthConfig (given in seconds, default: 0
\u2013 i.e. no cache update).
"},{"location":"authorino/docs/architecture/#repeated-requests","title":"Repeated requests","text":"Not implemented - In analysis (#20) For consecutive requests performed, within a given period of time, by a same user that request for a same resource, such that the result of the auth pipeline can be proven that would not change.
"},{"location":"authorino/docs/architecture/#sharding","title":"Sharding","text":"By default, Authorino instances will watch AuthConfig
CRs in the entire space (namespace or entire cluster; see Cluster-wide vs. Namespaced instances for details). To support combining multiple Authorino instances and instance modes in the same Kubernetes cluster, and yet avoiding superposition between the instances (i.e. multiple instances reconciling the same AuthConfig
s), Authorino offers support for data sharding, i.e. to horizontally narrow down the space of reconciliation of an Authorino instance to a subset of that space.
The benefits of limiting the space of reconciliation of an Authorino instance include avoiding unnecessary caching and workload in instances that do not receive corresponding traffic (according to your routing settings) and preventing leaders of multiple instances (sets of replicas) to compete on resource status updates (see Resource reconciliation and status update for details).
Use-cases for sharding of AuthConfig
s:
- Horizontal load balancing of traffic of authorization requests
- Supporting for managed centralized instances of Authorino to API owners who create and maintain their own
AuthConfig
s within their own user namespaces.
Authorino's custom controllers filter the AuthConfig
-related events to be reconciled using Kubernetes label selectors, defined for the Authorino instance via --auth-config-label-selector
command-line flag. By default, --auth-config-label-selector
is empty, meaning all AuthConfig
s in the space are watched; this variable can be set to any value parseable as a valid label selector, causing Authorino to then watch only events of AuthConfig
s whose metadata.labels
match the selector.
The following are all valid examples of AuthConfig
label selector filters:
--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino,other-label=other-value\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by in (authorino,kuadrant)\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by!=authorino-v0.4\"\n--auth-config-label-selector=\"!disabled\"\n
"},{"location":"authorino/docs/architecture/#rbac","title":"RBAC","text":"The table below describes the roles and role bindings defined by the Authorino service:
Role Kind Scope(*) Description Permissions authorino-manager-role
ClusterRole
C/N Role of the Authorino manager service Watch and reconcile AuthConfig
s and Secret
s authorino-manager-k8s-auth-role
ClusterRole
C/N Role for the Kubernetes auth features Create TokenReview
s and SubjectAccessReview
s (Kubernetes auth) authorino-leader-election-role
Role
N Leader election role Create/update the ConfigMap
used to coordinate which replica of Authorino is the leader authorino-authconfig-editor-role
ClusterRole
- AuthConfig
editor R/W AuthConfig
s; Read AuthConfig/status
authorino-authconfig-viewer-role
ClusterRole
- AuthConfig
viewer Read AuthConfig
s and AuthConfig/status
authorino-proxy-role
ClusterRole
C/N Kube-rbac-proxy-role (sidecar)'s role Create TokenReview
s and SubjectAccessReview
s to check permissions to the /metrics
endpoint authorino-metrics-reader
ClusterRole
- Metrics reader GET /metrics
(*) C - Cluster-wide | N - Authorino namespace | C/N - Cluster-wide or Authorino namespace (depending on the deployment mode).
"},{"location":"authorino/docs/architecture/#observability","title":"Observability","text":"Please refer to the Observability user guide for info on Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.
"},{"location":"authorino/docs/code_of_conduct/","title":"Code of conduct","text":""},{"location":"authorino/docs/code_of_conduct/#code-of-conduct","title":"Code of Conduct","text":"Autorino follows the Kuadrant Community Code of Conduct, which is based on the CNCF Code of Conduct.
Please refer to this page for a description of the standards and values we stand for in our relationship with the community.
"},{"location":"authorino/docs/contributing/","title":"Developer's Guide","text":""},{"location":"authorino/docs/contributing/#technology-stack-for-developers","title":"Technology stack for developers","text":"Minimum requirements to contribute to Authorino are:
- Golang v1.21+
- Docker
Authorino's code was originally bundled using the Operator SDK (v1.9.0).
The following tools can be installed as part of the development workflow:
-
Installed with go install
to the $PROJECT_DIR/bin
directory:
- controller-gen: for building custom types and manifests
- Kustomize: for assembling flavoured manifests and installing/deploying
- setup-envtest: for running the tests \u2013 extra tools installed to
./testbin
- benchstat: for human-friendly test benchmark reports
- mockgen: to generate mocks for tests \u2013 e.g.
./bin/mockgen -source=pkg/auth/auth.go -destination=pkg/auth/mocks/mock_auth.go
- Kind: for deploying a containerized Kubernetes cluster for integration testing purposes
-
Other recommended tools to have installed:
- jq
- yq
- gnu-sed
"},{"location":"authorino/docs/contributing/#workflow","title":"Workflow","text":""},{"location":"authorino/docs/contributing/#check-the-issues","title":"Check the issues","text":"Start by checking the list of issues in GitHub.
In case you want to contribute with an idea for enhancement, a bug fix, or question, please make sure to describe the issue so we can start a conversation together and help you find the best way to get your contribution merged.
"},{"location":"authorino/docs/contributing/#clone-the-repo-and-setup-the-local-environment","title":"Clone the repo and setup the local environment","text":"Fork/clone the repo:
git clone git@github.com:kuadrant/authorino.git && cd authorino\n
Download the Golang dependencies:
make vendor\n
For additional automation provided, check:
make help\n
"},{"location":"authorino/docs/contributing/#make-your-changes","title":"Make your changes","text":"Good changes...
- follow the Golang conventions
- have proper test coverage
- address corresponding updates to the docs
- help us fix wherever we failed to do the above \ud83d\ude1c
"},{"location":"authorino/docs/contributing/#run-the-tests","title":"Run the tests","text":"To run the tests:
make test\n
"},{"location":"authorino/docs/contributing/#try-locally","title":"Try locally","text":""},{"location":"authorino/docs/contributing/#build-deploy-and-try-authorino-in-a-local-cluster","title":"Build, deploy and try Authorino in a local cluster","text":"The following command will:
- Start a local Kubernetes cluster (using Kind)
- Install cert-manager in the cluster
- Install the Authorino Operator and Authorino CRDs
- Build an image of Authorino based on the current branch
- Push the freshly built image to the cluster's registry
- Generate TLS certificates for the Authorino service
- Deploy an instance of Authorino
- Deploy the example application Talker API, a simple HTTP API that echoes back whatever it gets in the request
- Setup Envoy for proxying to the Talker API and using Authorino for external authorization
make local-setup\n
You will be prompted to edit the Authorino
custom resource.
The main workload composed of Authorino instance and user apps (Envoy, Talker API) will be deployed to the default
Kubernetes namespace.
Once the deployment is ready, you can forward the requests on port 8000 to the Envoy service
kubectl port-forward deployment/envoy 8000:8000 &\n
Pro tips - Change the default workload namespace by supplying the
NAMESPACE
argument to your make local-setup
and other deployment, apps and local cluster related targets. If the namespace does not exist, it will be created. - Switch to TLS disabled by default when deploying locally by supplying
TLS_ENABLED=0
to your make local-setup
and make deploy
commands. E.g. make local-setup TLS_ENABLED=0
. - Skip being prompted to edit the
Authorino
CR and default to an Authorino deployment with TLS enabled, debug/development log level/mode, and standard name 'authorino', by supplying FF=1
to your make local-setup
and make deploy
commands. E.g. make local-setup FF=1
- Supply
DEPLOY_IDPS=1
to make local-setup
and make user-apps
to deploy Keycloak and Dex to the cluster. DEPLOY_KEYCLOAK
and DEPLOY_DEX
are also available. Read more about additional tools for specific use cases in the section below. - Saving the ID of the process (PID) of the port-forward command spawned in the background can be useful to later kill and restart the process. E.g.
kubectl port-forward deployment/envoy 8000:8000 &;PID=$!
; then kill $PID
.
"},{"location":"authorino/docs/contributing/#additional-tools-for-specific-use-cases","title":"Additional tools (for specific use-cases)","text":"Limitador To deploy Limitador \u2013 pre-configured in Envoy for rate-limiting the Talker API to 5 hits per minute per user_id
when available in the cluster workload \u2013, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
Keycloak Authorino examples include a bundle of Keycloak preloaded with the following realm setup:
- Admin console: http://localhost:8080/admin (admin/p)
- Preloaded realm: kuadrant
- Preloaded clients:
- demo: to which API consumers delegate access and therefore the one which access tokens are issued to
- authorino: used by Authorino to fetch additional user info with
client_credentials
grant type - talker-api: used by Authorino to fetch UMA-protected resource data associated with the Talker API
- Preloaded resources:
/hello
/greetings/1
(owned by user john) /greetings/2
(owned by user jane) /goodbye
- Realm roles:
- member (default to all users)
- admin
- Preloaded users:
- john/p (member)
- jane/p (admin)
- peter/p (member, email not verified)
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Forward local requests to the instance of Keycloak running in the cluster:
kubectl port-forward deployment/keycloak 8080:8080 &\n
Dex Authorino examples include a bundle of Dex preloaded with the following setup:
- Preloaded clients:
- demo: to which API consumers delegate access and therefore the one which access tokens are issued to (Client secret: aaf88e0e-d41d-4325-a068-57c4b0d61d8e)
- Preloaded users:
- marta@localhost/password
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/dex/dex-deploy.yaml\n
Forward local requests to the instance of Dex running in the cluster:
kubectl port-forward deployment/dex 5556:5556 &\n
a12n-server Authorino examples include a bundle of a12n-server and corresponding MySQL database, preloaded with the following setup:
- Admin console: http://a12n-server:8531 (admin/123456)
- Preloaded clients:
- service-account-1: to obtain access tokens via
client_credentials
OAuth2 grant type, to consume the Talker API (Client secret: DbgXROi3uhWYCxNUq_U1ZXjGfLHOIM8X3C2bJLpeEdE); includes metadata privilege: { \"talker-api\": [\"read\"] }
that can be used to write authorization policies - talker-api: to authenticate to the token introspect endpoint (Client secret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g)
To deploy, run:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n
Forward local requests to the instance of a12n-server running in the cluster:
kubectl port-forward deployment/a12n-server 8531:8531 &\n
"},{"location":"authorino/docs/contributing/#re-build-and-rollout-latest","title":"Re-build and rollout latest","text":"Re-build and rollout latest Authorino image:
make local-rollout\n
If you made changes to the CRD between iterations, re-install by running:
make install\n
"},{"location":"authorino/docs/contributing/#clean-up","title":"Clean-up","text":"The following command deletes the entire Kubernetes cluster started with Kind:
make local-cleanup\n
"},{"location":"authorino/docs/contributing/#sign-your-commits","title":"Sign your commits","text":"All commits to be accepted to Authorino's code are required to be signed. Refer to this page about signing your commits.
"},{"location":"authorino/docs/contributing/#logging-policy","title":"Logging policy","text":"A few guidelines for adding logging messages in your code:
- Make sure you understand Authorino's Logging architecture and policy regarding log levels, log modes, tracing IDs, etc.
- Respect controller-runtime's Logging Guidelines.
- Do not add sensitive data to your
info
log messages; instead, redact all sensitive data in your log messages or use debug
log level by mutating the logger with V(1)
before outputting the message.
"},{"location":"authorino/docs/contributing/#additional-resources","title":"Additional resources","text":"Here in the repo:
- Getting started
- Terminology
- Architecture
- Feature description
Other repos:
- Authorino Operator
- Authorino examples
"},{"location":"authorino/docs/contributing/#reach-out","title":"Reach out","text":"#kuadrant channel on kubernetes.slack.com.
"},{"location":"authorino/docs/features/","title":"Features","text":""},{"location":"authorino/docs/features/#overview","title":"Overview","text":"We call features of Authorino the different things one can do to enforce identity verification & authentication and authorization on requests to protected services. These can be a specific identity verification method based on a supported authentication protocol, or a method to fetch additional auth metadata in request-time, etc.
Most features of Authorino relate to the different phases of the Auth Pipeline and therefore are configured in the Authorino AuthConfig
. An identity verification/authentication feature usually refers to a functionality of Authorino such as the API key-based authentication, the validation of JWTs/OIDC ID tokens, and authentication based on Kubernetes TokenReviews. Analogously, OPA, pattern-matching and Kubernetes SubjectAccessReview are examples of authorization features of Authorino.
At a deeper level, a feature can also be an additional functionality within a bigger feature, usually applicable to the whole class the bigger feature belongs to. For instance, the configuration of how auth credentials expected to be carried in the request, which is broadly available for any identity verification method. Other examples are: Identity extension and Priorities.
A full specification of all features of Authorino that can be configured in an AuthConfig
can be found in the official spec of the custom resource definition.
You can also learn about Authorino features by using the kubectl explain
command in a Kubernetes cluster where the Authorino CRD has been installed. E.g. kubectl explain authconfigs.spec.authentication.credentials
.
"},{"location":"authorino/docs/features/#common-feature-json-paths-selector","title":"Common feature: JSON paths (selector
)","text":"The first feature of Authorino to learn about is a common functionality used in the specification of many other features. JSON paths are selectors of data from the Authorization JSON used in parts of an AuthConfig for referring to dynamic values of each authorization request.
Usage examples of JSON paths are: dynamic URLs and request parameters when fetching metadata from external sources, dynamic authorization policy rules, and dynamic authorization response attributes (e.g. injected HTTP headers, Festival Wristband token claims, etc).
"},{"location":"authorino/docs/features/#syntax","title":"Syntax","text":"The syntax to fetch data from the Authorization JSON with JSON paths is based on GJSON. Refer to GJSON Path Syntax page for more information.
"},{"location":"authorino/docs/features/#string-modifiers","title":"String modifiers","text":"On top of GJSON, Authorino defines a few string modifiers.
Examples below provided for the following Authorization JSON:
{\n \"context\": {\n \"request\": {\n \"http\": {\n \"path\": \"/pets/123\",\n \"headers\": {\n \"authorization\": \"Basic amFuZTpzZWNyZXQK\" // jane:secret\n \"baggage\": \"eyJrZXkxIjoidmFsdWUxIn0=\" // {\"key1\":\"value1\"}\n }\n }\n }\n },\n \"auth\": {\n \"identity\": {\n \"username\": \"jane\",\n \"fullname\": \"Jane Smith\",\n \"email\": \"\\u0006jane\\u0012@petcorp.com\\n\"\n },\n },\n}\n
@strip
Strips out any non-printable characters such as carriage return. E.g. auth.identity.email.@strip
\u2192 \"jane@petcorp.com\"
.
@case:upper|lower
Changes the case of a string. E.g. auth.identity.username.@case:upper
\u2192 \"JANE\"
.
@replace:{\"old\":string,\"new\":string}
Replaces a substring within a string. E.g. auth.identity.username.@replace:{\"old\":\"Smith\",\"new\":\"Doe\"}
\u2192 \"Jane Doe\"
.
@extract:{\"sep\":string,\"pos\":int}
Splits a string at occurrences of a separator (default: \" \"
) and selects the substring at the pos
-th position (default: 0
). E.g. context.request.path.@extract:{\"sep\":\"/\",\"pos\":2}
\u2192 123
.
@base64:encode|decode
base64-encodes or decodes a string value. E.g. auth.identity.username.decoded.@base64:encode
\u2192 \"amFuZQo=\"
.
In combination with @extract
, @base64
can be used to extract the username in an HTTP Basic Authentication request. E.g. context.request.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\",\"pos\":1}
\u2192 \"jane\"
.
"},{"location":"authorino/docs/features/#interpolation","title":"Interpolation","text":"JSON paths can be interpolated into strings to build template-like dynamic values. E.g. \"Hello, {auth.identity.name}!\"
.
"},{"location":"authorino/docs/features/#identity-verification-authentication-features-authentication","title":"Identity verification & authentication features (authentication
)","text":""},{"location":"authorino/docs/features/#api-key-authenticationapikey","title":"API key (authentication.apiKey
)","text":"Authorino relies on Kubernetes Secret
resources to represent API keys.
To define an API key, create a Secret
in the cluster containing an api_key
entry that holds the value of the API key.
API key secrets must be created in the same namespace of the AuthConfig
(default) or spec.authentication.apiKey.allNamespaces
must be set to true
(only works with cluster-wide Authorino instances).
API key secrets must be labeled with the labels that match the selectors specified in spec.authentication.apiKey.selector
in the AuthConfig
.
Whenever an AuthConfig
is indexed, Authorino will also index all matching API key secrets. In order for Authorino to also watch events related to API key secrets individually (e.g. new Secret
created, updates, deletion/revocation), Secret
s must also include a label that matches Authorino's bootstrap configuration --secret-label-selector
(default: authorino.kuadrant.io/managed-by=authorino
). This label may or may not be present to spec.authentication.apiKey.selector
in the AuthConfig
without implications for the caching of the API keys when triggered by the reconciliation of the AuthConfig
; however, if not present, individual changes related to the API key secret (i.e. without touching the AuthConfig
) will be ignored by the reconciler.
Example. For the following AuthConfig
:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\n namespace: authorino-system\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"api-key-users\":\n apiKey:\n selector:\n matchLabels: # the key-value set used to select the matching `Secret`s; resources including these labels will be accepted as valid API keys to authenticate to this service\n group: friends # some custom label\n allNamespaces: true # only works with cluster-wide Authorino instances; otherwise, create the API key secrets in the same namespace of the AuthConfig\n
The following Kubernetes Secret
represents a valid API key:
apiVersion: v1\nkind: Secret\nmetadata:\n name: user-1-api-key-1\n namespace: default\n labels:\n authorino.kuadrant.io/managed-by: authorino # so the Authorino controller reconciles events related to this secret\n group: friends\nstringData:\n api_key: <some-randomly-generated-api-key-value>\ntype: Opaque\n
The resolved identity object, added to the authorization JSON following an API key identity source evaluation, is the Kubernetes Secret
resource (as JSON).
"},{"location":"authorino/docs/features/#kubernetes-tokenreview-authenticationkubernetestokenreview","title":"Kubernetes TokenReview (authentication.kubernetesTokenReview
)","text":"Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).
These tokens can be either ServiceAccount
tokens such as the ones issued by kubelet as part of Kubernetes Service Account Token Volume Projection, or any valid user access tokens issued to users of the Kubernetes server API.
The list of audiences
of the token must include the requested host and port of the protected API (default), or all audiences specified in the Authorino AuthConfig
custom resource. For example:
For the following AuthConfig
CR, the Kubernetes token must include the audience my-api.io
:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"cluster-users\":\n kubernetesTokenReview: {}\n
Whereas for the following AuthConfig
CR, the Kubernetes token audiences must include foo and bar:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"cluster-users\":\n kubernetesTokenReview:\n audiences:\n - foo\n - bar\n
The resolved identity object added to the authorization JSON following a successful Kubernetes authentication identity evaluation is the status
field of TokenReview response (see TokenReviewStatus for reference).
"},{"location":"authorino/docs/features/#jwt-verification-authenticationjwt","title":"JWT verification (authentication.jwt
)","text":"In reconciliation-time, using OpenID Connect Discovery well-known endpoint, Authorino automatically discovers and caches OpenID Connect configurations and associated JSON Web Key Sets (JWKS) for all OpenID Connect issuers declared in an AuthConfig
. Then, in request-time, Authorino verifies the JSON Web Signature (JWS) and check the time validity of signed JSON Web Tokens (JWT) supplied on each request.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
The kid
claim stated in the JWT header must match one of the keys cached by Authorino during OpenID Connect Discovery, therefore supporting JWK rotation.
The decoded payload of the validated JWT is appended to the authorization JSON as the resolved identity.
OpenID Connect configurations and linked JSON Web Key Sets can be configured to be automatically refreshed (pull again from the OpenID Connect Discovery well-known endpoints), by setting the authentication.jwt.ttl
field (given in seconds, default: 0
\u2013 i.e. auto-refresh disabled).
For an excellent summary of the underlying concepts and standards that relate OpenID Connect and JSON Object Signing and Encryption (JOSE), see this article by Jan Rusnacko. For official specification and RFCs, see OpenID Connect Core, OpenID Connect Discovery, JSON Web Token (JWT) (RFC7519), and JSON Object Signing and Encryption (JOSE).
"},{"location":"authorino/docs/features/#oauth-20-introspection-authenticationoauth2introspection","title":"OAuth 2.0 introspection (authentication.oauth2Introspection
)","text":"For bare OAuth 2.0 implementations, Authorino can perform token introspection on the access tokens supplied in the requests to protected APIs.
Authorino does not implement any of OAuth 2.0 grants for the applications to obtain the token. However, it can verify supplied tokens with the OAuth server, including opaque tokens, as long as the server exposes the token_introspect
endpoint (RFC 7662).
Developers must set the token introspection endpoint in the AuthConfig
, as well as a reference to the Kubernetes secret storing the credentials of the OAuth client to be used by Authorino when requesting the introspect.
The response returned by the OAuth2 server to the token introspection request is the resolved identity appended to the authorization JSON.
"},{"location":"authorino/docs/features/#x509-client-certificate-authentication-authenticationx509","title":"X.509 client certificate authentication (authentication.x509
)","text":"Authorino can verify X.509 certificates presented by clients for authentication on the request to the protected APIs, at application level.
Trusted root Certificate Authorities (CA) are stored in Kubernetes Secrets labeled according to selectors specified in the AuthConfig, watched and indexed by Authorino. Make sure to create proper kubernetes.io/tls
-typed Kubernetes Secrets, containing the public certificates of the CA stored in either a tls.crt
or ca.crt
entry inside the secret.
Trusted root CA secrets must be created in the same namespace of the AuthConfig
(default) or spec.authentication.x509.allNamespaces
must be set to true
(only works with cluster-wide Authorino instances).
Client certificates must include x509 v3 extension specifying 'Client Authentication' extended key usage.
The identity object resolved out of a client x509 certificate is equal to the subject field of the certificate, and it serializes as JSON within the Authorization JSON usually as follows:
{\n \"auth\": {\n \"identity\": {\n \"CommonName\": \"aisha\",\n \"Country\": [\"PK\"],\n \"ExtraNames\": null,\n \"Locality\": [\"Islamabad\"],\n \"Names\": [\n { \"Type\": [2, 5, 4, 3], \"Value\": \"aisha\" },\n { \"Type\": [2, 5, 4, 6], \"Value\": \"PK\" },\n { \"Type\": [2, 5, 4, 7], \"Value\": \"Islamabad\" },\n { \"Type\": [2, 5, 4,10], \"Value\": \"ACME Inc.\" },\n { \"Type\": [2, 5, 4,11], \"Value\": \"Engineering\" }\n ],\n \"Organization\": [\"ACME Inc.\"],\n \"OrganizationalUnit\": [\"Engineering\"],\n \"PostalCode\": null,\n \"Province\": null,\n \"SerialNumber\": \"\",\n \"StreetAddress\": null\n }\n }\n}\n
"},{"location":"authorino/docs/features/#plain-authenticationplain","title":"Plain (authentication.plain
)","text":"Authorino can read plain identity objects, based on authentication tokens provided and verified beforehand using other means (e.g. Envoy JWT Authentication filter, Kubernetes API server authentication), and injected into the payload to the external authorization service.
The plain identity object is retrieved from the Authorization JSON based on a JSON path specified in the AuthConfig
.
This feature is particularly useful in cases where authentication/identity verification is handled before invoking the authorization service and its resolved value injected in the payload can be trusted. Examples of applications for this feature include:
- Authentication handled in Envoy leveraging the Envoy JWT Authentication filter (decoded JWT injected as 'metadata_context')
- Use of Authorino as Kubernetes ValidatingWebhook service (Kubernetes 'userInfo' injected in the body of the
AdmissionReview
request)
Example of AuthConfig
to retrieve plain identity object from the Authorization JSON.
spec:\n authentication:\n \"pre-validated-jwt\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n
If the specified JSON path does not exist in the Authorization JSON or the value is null
, the identity verification will fail and, unless other identity config succeeds, Authorino will halt the Auth Pipeline with the usual 401 Unauthorized
.
"},{"location":"authorino/docs/features/#anonymous-access-authenticationanonymous","title":"Anonymous access (authentication.anonymous
)","text":"Literally a no-op evaluator for the identity verification phase that returns a static identity object {\"anonymous\":true}
.
It allows to implement AuthConfigs
that bypasses the identity verification phase of Authorino, to such as:
- enable anonymous access to protected services (always or combined with Priorities)
- postpone authentication in the Auth Pipeline to be resolved as part of an OPA policy
Example of AuthConfig
spec that falls back to anonymous access when OIDC authentication fails, enforcing read-only access to the protected service in such cases:
spec:\n authentication:\n \"jwt\":\n jwt:\n issuerUrl: \"\u2026\"\n \"anonymous\":\n priority: 1 # expired oidc token, missing creds, etc. default to anonymous access\n anonymous: {}\n authorization:\n \"read-only-access-if-authn-fails\":\n when:\n\n - selector: auth.identity.anonymous\n operator: eq\n value: \"true\"\n patternMatching:\n patterns:\n - selector: context.request.http.method\n operator: eq\n value: GET\n
"},{"location":"authorino/docs/features/#festival-wristband-authentication","title":"Festival Wristband authentication","text":"Authorino-issued Festival Wristband tokens can be validated as any other signed JWT using Authorino's JWT verification.
The value of the issuer must be the same issuer specified in the custom resource for the protected API originally issuing wristband. Eventually, this can be the same custom resource where the wristband is configured as a valid source of identity, but not necessarily.
"},{"location":"authorino/docs/features/#extra-auth-credentials-authenticationcredentials","title":"Extra: Auth credentials (authentication.credentials
)","text":"All the identity verification methods supported by Authorino can be configured regarding the location where access tokens and credentials (i.e. authentication secrets) fly within the request.
By default, authentication secrets are expected to be supplied in the Authorization
HTTP header, with the default Bearer
prefix and the plain authentication secret separated by space.
The full list of supported options is exemplified below:
spec:\n authentication:\n \"creds-in-the-authz-header\":\n credentials:\n authorizationHeader:\n prefix: JWT\n\n \"creds-in-a-custom-header\":\n credentials:\n customHeader:\n name: X-MY-CUSTOM-HEADER\n prefix: \"\"\n\n \"creds-in-a-query-param\":\n queryString:\n name: my_param\n\n \"creds-in-a-cookie-entry\":\n cookie:\n name: cookie-key\n
"},{"location":"authorino/docs/features/#extra-identity-extension-authenticationdefaults-and-authenticationoverrides","title":"Extra: Identity extension (authentication.defaults
and authentication.overrides
)","text":"Resolved identity objects can be extended with user-defined JSON properties. Values can be static or fetched from the Authorization JSON.
A typical use-case for this feature is token normalization. Say you have more than one identity source listed in your AuthConfig
but each source issues an access token with a different JSON structure \u2013 e.g. two OIDC issuers that use different names for custom JWT claims of similar meaning; when two different identity verification/authentication methods are combined, such as API keys (whose identity objects are the corresponding Kubernetes Secret
s) and Kubernetes tokens (whose identity objects are Kubernetes UserInfo data).
In such cases, identity extension can be used to normalize the token to always include the same set of JSON properties of interest, regardless of the source of identity that issued the original token verified by Authorino. This simplifies the writing of authorization policies and configuration of dynamic responses.
In case of extending an existing property of the identity object (replacing), the API allows to control whether to overwrite the value or not. This is particularly useful for normalizing tokens of a same identity source that nonetheless may occasionally differ in structure, such as in the case of JWT claims that sometimes may not be present but can be safely replaced with another (e.g. username
or sub
).
"},{"location":"authorino/docs/features/#external-auth-metadata-features-metadata","title":"External auth metadata features (metadata
)","text":""},{"location":"authorino/docs/features/#http-getget-by-post-metadatahttp","title":"HTTP GET/GET-by-POST (metadata.http
)","text":"Generic HTTP adapter that sends a request to an external service. It can be used to fetch external metadata for the authorization policies (phase ii of the Authorino Auth Pipeline), or as a web hook.
The adapter allows issuing requests either by GET or POST methods; in both cases with URL and parameters defined by the user in the spec. Dynamic values fetched from the Authorization JSON can be used.
POST request parameters as well as the encoding of the content can be controlled using the bodyParameters
and contentType
fields of the config, respectively. The Content-Type of POST requests can be either application/x-www-form-urlencoded
(default) or application/json
.
Authentication of Authorino with the external metadata server can be set either via long-lived shared secret stored in a Kubernetes Secret or via OAuth2 client credentials grant. For long-lived shared secret, set the sharedSecretRef
field. For OAuth2 client credentials grant, use the oauth2
option.
In both cases, the location where the secret (long-lived or OAuth2 access token) travels in the request performed to the external HTTP service can be specified in the credentials
field. By default, the authentication secret is supplied in the Authorization
header with the Bearer
prefix.
Custom headers can be set with the headers
field. Nevertheless, headers such as Content-Type
and Authorization
(or eventual custom header used for carrying the authentication secret, set instead via the credentials
option) will be superseded by the respective values defined for the fields contentType
and sharedSecretRef
.
"},{"location":"authorino/docs/features/#oidc-userinfo-metadatauserinfo","title":"OIDC UserInfo (metadata.userInfo
)","text":"Online fetching of OpenID Connect (OIDC) UserInfo data (phase ii of the Authorino Auth Pipeline), associated with an OIDC identity source configured and resolved in phase (i).
Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline.
Implementation requires a JWT verification authentication config (spec.authentication.jwt
) in the same AuthConfig
, so the well-known configuration of the OpenId Connect (OIDC) issuer can be reused.
The response returned by the OIDC server to the UserInfo request is appended (as JSON) to auth.metadata
in the authorization JSON.
"},{"location":"authorino/docs/features/#user-managed-access-uma-resource-registry-metadatauma","title":"User-Managed Access (UMA) resource registry (metadata.uma
)","text":"User-Managed Access (UMA) is an OAuth-based protocol for resource owners to allow other users to access their resources. Since the UMA-compliant server is expected to know about the resources, Authorino includes a client that fetches resource data from the server and adds that as metadata of the authorization payload.
This enables the implementation of resource-level Attribute-Based Access Control (ABAC) policies. Attributes of the resource fetched in a UMA flow can be, e.g., the owner of the resource, or any business-level attributes stored in the UMA-compliant server.
A UMA-compliant server is an external authorization server (e.g., Keycloak) where the protected resources are registered. It can be as well the upstream API itself, as long as it implements the UMA protocol, with initial authentication by client_credentials
grant to exchange for a Protected API Token (PAT).
It's important to notice that Authorino does NOT manage resources in the UMA-compliant server. As shown in the flow above, Authorino's UMA client is only to fetch data about the requested resources. Authorino exchanges client credentials for a Protected API Token (PAT), then queries for resources whose URI match the path of the HTTP request (as passed to Authorino by the Envoy proxy) and fetches data of each matching resource.
The resources data is added as metadata of the authorization payload and passed as input for the configured authorization policies. All resources returned by the UMA-compliant server in the query by URI are passed along. They are available in the PDPs (authorization payload) as input.auth.metadata.custom-name => Array
. (See The \"Auth Pipeline\" for details.)
"},{"location":"authorino/docs/features/#authorization-features-authorization","title":"Authorization features (authorization
)","text":""},{"location":"authorino/docs/features/#pattern-matching-authorization-authorizationpatternmatching","title":"Pattern-matching authorization (authorization.patternMatching
)","text":"Grant/deny access based on simple pattern-matching expressions (\"patterns\") compared against values selected from the Authorization JSON.
Each expression is a tuple composed of:
- a
selector
, to fetch from the Authorization JSON \u2013 see Common feature: JSON paths for details about syntax; - an
operator
\u2013 eq
(equals), neq
(not equal); incl
(includes) and excl
(excludes), for arrays; and matches
, for regular expressions; - a fixed comparable
value
Rules can mix and combine literal expressions and references to expression sets (\"named patterns\") defined at the upper level of the AuthConfig
spec. (See Common feature: Conditions)
spec:\n authorization:\n \"my-simple-json-pattern-matching-policy\":\n patternMatching:\n patterns: # All patterns must match for access to be granted\n\n - selector: auth.identity.email_verified\n operator: eq\n value: \"true\"\n - patternRef: admin\n\n patterns:\n admin: # a named pattern that can be reused in other sets of rules or conditions\n\n - selector: auth.identity.roles\n operator: incl\n value: admin\n
"},{"location":"authorino/docs/features/#open-policy-agent-opa-rego-policies-authorizationopa","title":"Open Policy Agent (OPA) Rego policies (authorization.opa
)","text":"You can model authorization policies in Rego language and add them as part of the protection of your APIs.
Policies can be either declared in-line in Rego language (rego
) or as an HTTP endpoint where Authorino will fetch the source code of the policy in reconciliation-time (externalPolicy
).
Policies pulled from external registries can be configured to be automatically refreshed (pulled again from the external registry), by setting the authorization.opa.externalPolicy.ttl
field (given in seconds, default: 0
\u2013 i.e. auto-refresh disabled).
Authorino's built-in OPA module precompiles the policies during reconciliation of the AuthConfig and caches the precompiled policies for fast evaluation in runtime, where they receive the Authorization JSON as input.
An optional field allValues: boolean
makes the values of all rules declared in the Rego document to be returned in the OPA output after policy evaluation. When disabled (default), only the boolean value allow
is returned. Values of internal rules of the Rego document can be referenced in subsequent policies/phases of the Auth Pipeline.
"},{"location":"authorino/docs/features/#kubernetes-subjectaccessreview-authorizationkubernetessubjectaccessreview","title":"Kubernetes SubjectAccessReview (authorization.kubernetesSubjectAccessReview
)","text":"Access control enforcement based on rules defined in the Kubernetes authorization system, i.e. Role
, ClusterRole
, RoleBinding
and ClusterRoleBinding
resources of Kubernetes RBAC.
Authorino issues a SubjectAccessReview (SAR) inquiry that checks with the underlying Kubernetes server whether the user can access a particular resource, resource kind or generic URL.
It supports resource attributes authorization check (parameters defined in the AuthConfig
) and non-resource attributes authorization check (HTTP endpoint inferred from the original request).
- Resource attributes: adequate for permissions set at namespace level, defined in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb)
- Non-resource attributes: adequate for permissions set at cluster scope, defined for protected endpoints of a generic HTTP API (URL path + verb)
Example of Kubernetes role for resource attributes authorization:
apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: pet-reader\nrules:\n\n- apiGroups: [\"pets.io\"]\n resources: [\"pets\"]\n verbs: [\"get\"]\n
Example of Kubernetes cluster role for non-resource attributes authorization:
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: pet-editor\nrules:\n\n- nonResourceURLs: [\"/pets/*\"]\n verbs: [\"put\", \"delete\"]\n
Kubernetes' authorization policy configs look like the following in an Authorino AuthConfig
:
authorization:\n \"kubernetes-rbac\":\n kubernetesSubjectAccessReview:\n user: # values of the parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n selector: auth.identity.metadata.annotations.userid\n\n groups: [] # user groups to test for.\n\n # for resource attributes permission checks; omit it to perform a non-resource attributes SubjectAccessReview with path and method/verb assumed from the original request\n # if included, use the resource attributes, where the values for each parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n resourceAttributes:\n namespace:\n value: default\n group:\n value: pets.io # the api group of the protected resource to be checked for permissions for the user\n resource:\n value: pets # the resource kind\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2} # resource name \u2013 e.g., the {id} in `/pets/{id}`\n verb:\n selector: context.request.http.method.@case:lower # api operation \u2013 e.g., copying from the context to use the same http method of the request\n
user
and properties of resourceAttributes
can be defined from fixed values or patterns of the Authorization JSON.
An array of groups
(optional) can as well be set. When defined, it will be used in the SubjectAccessReview
request.
"},{"location":"authorino/docs/features/#spicedb-authorizationspicedb","title":"SpiceDB (authorization.spicedb
)","text":"Check permission requests via gRPC with an external Google Zanzibar-inspired SpiceDB server, by Authzed.
Subject, resource and permission parameters can be set to static values or read from the Authorization JSON.
spec:\n authorization:\n \"spicedb\":\n spicedb:\n endpoint: spicedb:50051\n insecure: true # disables TLS\n sharedSecretRef:\n name: spicedb\n key: token\n subject:\n kind:\n value: blog/user\n name:\n selector: auth.identity.sub\n resource:\n kind:\n value: blog/post\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2} # /posts/{id}\n permission:\n selector: context.request.http.method\n
"},{"location":"authorino/docs/features/#custom-response-features-response","title":"Custom response features (response
)","text":""},{"location":"authorino/docs/features/#custom-response-forms-successful-authorization-vs-custom-denial-status","title":"Custom response forms: successful authorization vs custom denial status","text":"The response to the external authorization request can be customized in the following fashion:
- Successful authorization (
response.success
) - Added HTTP headers (
response.success.headers
) - Envoy Dynamic Metadata (
response.success.dynamicMetadata
) - Custom denial status
- Unauthenticated (
response.unauthenticated
) - Unauthorized (
response.unauthorized
)
Successful authorization custom responses can be set based on any of the supported custom authorization methods:
- Plain text value
- JSON injection
- Festival Wristband Tokens
"},{"location":"authorino/docs/features/#added-http-headers","title":"Added HTTP headers","text":"Set custom responses as HTTP headers injected in the request post-successful authorization by specifying one of the supported methods under response.success.headers
.
The name of the response config (default) or the value of the key
option (if provided) will used as the name of the header.
"},{"location":"authorino/docs/features/#envoy-dynamic-metadata","title":"Envoy Dynamic Metadata","text":"Authorino custom response methods can also be used to propagate Envoy Dynamic Metadata. To do so, set one of the supported methods under response.success.dynamicMetadata
.
The name of the response config (default) or the value of the key
option (if provided) will used as the name of the root property of the dynamic metadata content.
A custom response exported as Envoy Dynamic Metadata can be set in the Envoy route or virtual host configuration as input to a consecutive filter in the filter chain.
E.g., to read metadata emitted by the authorization service with scheme { \"auth-data\": { \"api-key-ns\": string, \"api-key-name\": string } }
, as input in a rate limit configuration placed in the filter chain after the external authorization, the Envoy config may look like the following:
# Envoy config snippet to inject `user_namespace` and `username` rate limit descriptors from metadata emitted by Authorino\nrate_limits:\n\n- actions:\n - metadata:\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n - key: api-key-ns\n descriptor_key: user_namespace\n - metadata:\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n - key: api-key-name\n descriptor_key: username\n
"},{"location":"authorino/docs/features/#custom-denial-status-responseunauthenticated-and-responseunauthorized","title":"Custom denial status (response.unauthenticated
and response.unauthorized
)","text":"By default, Authorino will inform Envoy to respond with 401 Unauthorized
or 403 Forbidden
respectively when the identity verification (phase i of the Auth Pipeline) or authorization (phase ii) fail. These can be customized respectively by specifying spec.response.unauthanticated
and spec.response.unauthorized
in the AuthConfig
.
"},{"location":"authorino/docs/features/#custom-response-methods","title":"Custom response methods","text":""},{"location":"authorino/docs/features/#plain-text-responsesuccessheadersdynamicmetadataplain","title":"Plain text (response.success.<headers|dynamicMetadata>.plain
)","text":"Simpler, yet more generalized form, for extending the authorization response for header mutation and Envoy Dynamic Metadata, based on plain text values.
The value can be static:
response:\n success:\n headers:\n \"x-auth-service\"\n plain:\n value: Authorino\n
or fetched dynamically from the Authorization JSON (which includes support for interpolation):
response:\n success:\n headers:\n \"x-username\":\n plain:\n selector: auth.identity.username\n
"},{"location":"authorino/docs/features/#json-injection-responsesuccessheadersdynamicmetadatajson","title":"JSON injection (response.success.<headers|dynamicMetadata>.json
)","text":"User-defined dynamic JSON objects generated by Authorino in the response phase, from static or dynamic data of the auth pipeline, and passed back to the external authorization client within added HTTP headers or Dynamic Metadata.
The following Authorino AuthConfig
custom resource is an example that defines 3 dynamic JSON response items, where two items are returned to the client, stringified, in added HTTP headers, and the third as Envoy Dynamic Metadata. Envoy proxy can be configured to propagate the dynamic metadata emitted by Authorino into another filter \u2013 e.g. the rate limit filter.
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n namespace: my-namespace\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"edge\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-my-custom-header\":\n json:\n properties:\n \"prop1\":\n value: value1\n \"prop2\":\n selector: some.path.within.auth.json\n \"x-ext-auth-other-json\":\n json:\n properties:\n \"propX\":\n value: valueX\n\n dynamicMetadata:\n \"auth-data\":\n json:\n properties:\n \"api-key-ns\":\n seletor: auth.identity.metadata.namespace\n \"api-key-name\":\n selector: auth.identity.metadata.name\n
"},{"location":"authorino/docs/features/#festival-wristband-tokens-responsesuccessheadersdynamicmetadatawristband","title":"Festival Wristband tokens (response.success.<headers|dynamicMetadata>.wristband
)","text":"Festival Wristbands are signed OpenID Connect JSON Web Tokens (JWTs) issued by Authorino at the end of the auth pipeline and passed back to the client, typically in added HTTP response header. It is an opt-in feature that can be used to implement Edge Authentication Architecture (EAA) and enable token normalization. Authorino wristbands include minimal standard JWT claims such as iss
, iat
, and exp
, and optional user-defined custom claims, whose values can be static or dynamically fetched from the authorization JSON.
The Authorino AuthConfig
custom resource below sets an API protection that issues a wristband after a successful authentication via API key. Apart from standard JWT claims, the wristband contains 2 custom claims: a static value aud=internal
and a dynamic value born
that fetches from the authorization JSON the date/time of creation of the secret that represents the API key used to authenticate.
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n namespace: my-namespace\n name: my-api-protection\nspec:\n hosts:\n\n - my-api.io\n authentication:\n \"edge\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-wristband\":\n wristband:\n issuer: https://authorino-oidc.default.svc:8083/my-namespace/my-api-protection/x-wristband\n customClaims:\n \"aud\":\n value: internal\n \"born\":\n selector: auth.identity.metadata.creationTimestamp\n tokenDuration: 300\n signingKeyRefs:\n - name: my-signing-key\n algorithm: ES256\n - name: my-old-signing-key\n algorithm: RS256\n
The signing key names listed in signingKeyRefs
must match the names of Kubernetes Secret
resources created in the same namespace, where each secret contains a key.pem
entry that holds the value of the private key that will be used to sign the wristbands issued, formatted as PEM. The first key in this list will be used to sign the wristbands, while the others are kept to support key rotation.
For each protected API configured for the Festival Wristband issuing, Authorino exposes the following OpenID Connect Discovery well-known endpoints (available for requests within the cluster):
- OpenID Connect configuration: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-configuration
- JSON Web Key Set (JWKS) well-known endpoint: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-connect/certs
"},{"location":"authorino/docs/features/#callbacks-callbacks","title":"Callbacks (callbacks
)","text":""},{"location":"authorino/docs/features/#http-endpoints-callbackshttp","title":"HTTP endpoints (callbacks.http
)","text":"Sends requests to specified HTTP endpoints at the end of the auth pipeline.
The scheme of the http
field is the same as of metadata.http
.
Example:
spec:\n authentication: [\u2026]\n authorization: [\u2026]\n\n callbacks:\n \"log\":\n http:\n url: http://logsys\n method: POST\n body:\n selector: |\n \\{\"requestId\":context.request.http.id,\"username\":\"{auth.identity.username}\",\"authorizationResult\":{auth.authorization}\\}\n \"important-forbidden\":\n when:\n\n - selector: auth.authorization.important-policy\n operator: eq\n value: \"false\"\n http:\n url: \"http://monitoring/important?forbidden-user={auth.identity.username}\"\n
"},{"location":"authorino/docs/features/#common-feature-priorities","title":"Common feature: Priorities","text":"Priorities allow to set sequence of execution for blocks of concurrent evaluators within phases of the Auth Pipeline.
Evaluators of same priority execute concurrently to each other \"in a block\". After syncing that block (i.e. after all evaluators of the block have returned), the next block of evaluator configs of consecutive priority is triggered.
Use cases for priorities are:
- Saving expensive tasks to be triggered when there's a high chance of returning immediately after finishing executing a less expensive one \u2013 e.g.
- an identity config that calls an external IdP to verify a token that is rarely used, compared to verifying JWTs preferred by most users of the service;
- an authorization policy that performs some quick checks first, such as verifying allowed paths, and only if it passes, moves to the evaluation of a more expensive policy.
- Establishing dependencies between evaluators - e.g.
- an external metadata request that needs to wait until a previous metadata responds first (in order to use data from the response)
Priorities can be set using the priority
property available in all evaluator configs of all phases of the Auth Pipeline (identity, metadata, authorization and response). The lower the number, the highest the priority. By default, all evaluators have priority 0 (i.e. highest priority).
Consider the following example to understand how priorities work:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api\n authentication:\n \"tier-1\":\n priority: 0\n apiKey:\n selector:\n matchLabels:\n tier: \"1\"\n \"tier-2\":\n priority: 1\n apiKey:\n selector:\n matchLabels:\n tier: \"2\"\n \"tier-3\":\n priority: 1\n apiKey:\n selector:\n matchLabels:\n tier: \"3\"\n metadata:\n \"first\":\n http:\n url: http://talker-api:3000\n \"second\":\n priority: 1\n http:\n url: http://talker-api:3000/first_uuid={auth.metadata.first.uuid}\n authorization:\n \"allowed-endpoints\":\n when:\n - selector: context.request.http.path\n operator: neq\n value: /hi\n - selector: context.request.http.path\n operator: neq\n value: /hello\n - selector: context.request.http.path\n operator: neq\n value: /aloha\n - selector: context.request.http.path\n operator: neq\n value: /ciao\n patternMatching:\n patterns:\n - selector: deny\n operator: eq\n value: \"true\"\n \"more-expensive-policy\": # no point in evaluating this one if it's not an allowed endpoint\n priority: 1\n opa:\n rego: |\n allow { true }\n response:\n success:\n headers:\n \"x-auth-data\":\n json:\n properties:\n \"tier\":\n selector: auth.identity.metadata.labels.tier\n \"first-uuid\":\n selector: auth.metadata.first.uuid\n \"second-uuid\":\n selector: auth.metadata.second.uuid\n \"second-path\":\n selector: auth.metadata.second.path\n
For the AuthConfig
above,
-
Identity configs tier-2
and tier-3
(priority 1) will only trigger (concurrently) in case tier-1
(priority 0) fails to validate the authentication token first. (This behavior happens without prejudice of context canceling between concurrent evaluators \u2013 i.e. evaluators that are triggered concurrently to another, such as tier-2
and tier-3
, continue to cancel the context of each other if any of them succeeds validating the token first.)
-
Metadata source second
(priority 1) uses the response of the request issued by metadata source first
(priority 0), so it will wait for first
to finish by triggering only in the second block.
-
Authorization policy allowed-endpoints
(priority 0) is considered to be a lot less expensive than more-expensive-policy
(priority 1) and has a high chance of denying access to the protected service (if the path is not one of the allowed endpoints). By setting different priorities to these policies we ensure the more expensive policy if triggered in sequence of the less expensive one, instead of concurrently.
"},{"location":"authorino/docs/features/#common-feature-conditions-when","title":"Common feature: Conditions (when
)","text":"Conditions, named when
in the AuthConfig API, are logical expressions, composed of patterns and logical operator AND and OR, that can be used to condition the evaluation of a particular auth rule within an AuthConfig, as well as of the AuthConfig altogether (\"top-level conditions\").
The patterns are evaluated against the Authorization JSON, where each pattern is a tuple composed of:
selector
: a JSON path to fetch a value from the Authorization JSON operator
: one of: eq
(equals); neq
(not equal); incl
(includes) and excl
(excludes), for when the value fetched from the Authorization JSON is expected to be an array; matches
, for regular expressions value
: a static string value to compare the value selected from the Authorization JSON with.
An expression contains one or more patterns and they must either all evaluate to true (\"AND\" operator, declared by grouping the patterns within an all
block) or at least one of the patterns must be true (\"OR\" operator, when grouped within an any
block.) Patterns not explicitly grouped are AND'ed by default.
To avoid repetitions when listing patterns, any set of literal { pattern, operator, value }
tuples can be stored at the top-level of the AuthConfig spec, indexed by name, and later referred within an expression by including a patternRef
in the block of conditions.
Examples of when
conditions
i) to skip an entire AuthConfig
based on the context (AND operator assumed by default):
spec:\n when: # auth enforced only on requests to POST /resources/*\n\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
ii) equivalent to the above with explicit AND operator (i.e., all
block):
spec:\n when: # auth enforced only on requests to POST /resources/*\n\n - all:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
iii) OR condition (i.e., any
block):
spec:\n when: # auth enforced only on requests with HTTP method equals to POST or PUT\n\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n
iv) complex expression with nested operations:
spec:\n when: # auth enforced only on requests to POST /resources/* or PUT /resources/*\n\n - any:\n - all:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n - all:\n - selector: context.request.http.method\n operator: eq\n value: PUT\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n
v) more concise equivalent of the above (with implicit AND operator at the top level):
spec:\n when: # auth enforced only on requests to /resources/* path with method equals to POST or PUT\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/.*\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n
vi) to skip part of an AuthConfig (i.e., a specific auth rule):
spec:\n metadata:\n \"metadata-source\":\n http:\n url: https://my-metadata-source.io\n when: # only fetch the external metadata if the context is HTTP method other than OPTIONS\n\n - selector: context.request.http.method\n operator: neq\n value: OPTIONS\n
vii) skipping part of an AuthConfig will not affect other auth rules:
spec:\n authentication:\n \"authn-meth-1\":\n apiKey: {\u2026} # this auth rule only triggers for POST requests to /foo[/*]\n when:\n\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.path\n operator: matches\n value: ^/foo(/.*)?$\n\n \"authn-meth-2\": # this auth rule triggerred regardless\n jwt: {\u2026}\n
viii) concrete use-case: evaluating only the necessary identity checks based on the user's indication of the preferred authentication method (prefix of the value supplied in the HTTP Authorization
request header):
spec:\n authentication:\n \"jwt\":\n when:\n\n - selector: context.request.http.headers.authorization\n operator: matches\n value: JWT .+\n jwt: {\u2026}\n\n \"api-key\":\n when:\n\n - selector: context.request.http.headers.authorization\n operator: matches\n value: APIKEY .+\n apiKey: {\u2026}\n
ix) to avoid repetition while defining patterns for conditions:
spec:\n patterns:\n a-pet: # a named pattern that can be reused in sets of conditions\n\n - selector: context.request.http.path\n operator: matches\n value: ^/pets/\\d+(/.*)$\n\n metadata:\n \"pets-info\":\n when:\n\n - patternRef: a-pet\n http:\n url: https://pets-info.io?petId={context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}}\n\n authorization:\n \"pets-owners-only\":\n when:\n\n - patternRef: a-pet\n opa:\n rego: |\n allow { input.metadata[\"pets-info\"].ownerid == input.auth.identity.userid }\n
x) combining literals and refs \u2013 concrete case: authentication required for selected operations:
spec:\n patterns:\n api-base-path:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/api/.*\n\n authenticated-user:\n\n - selector: auth.identity.anonymous\n operator: neq\n value: \"true\"\n\n authentication:\n api-users: # tries to authenticate all requests to path /api/*\n when:\n\n - patternRef: api-base-path\n jwt: {\u2026}\n\n others: # defaults to anonymous access when authentication fails or not /api/* path\n anonymous: {}\n priority: 1\n\n authorization:\n api-write-access-requires-authentication: # POST/PUT/DELETE requests to /api/* path cannot be anonymous\n when:\n\n - all:\n - patternRef: api-base-path\n - any:\n - selector: context.request.http.method\n operator: eq\n value: POST\n - selector: context.request.http.method\n operator: eq\n value: PUT\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n opa:\n patternMatching:\n rules:\n - patternRef: authenticated-user\n\n response: # bonus: export user data if available\n success:\n dynamicMetadata:\n \"user-data\":\n when:\n\n - patternRef: authenticated-user\n json:\n properties:\n jwt-claims:\n selector: auth.identity\n
"},{"location":"authorino/docs/features/#common-feature-caching-cache","title":"Common feature: Caching (cache
)","text":"Objects resolved at runtime in an Auth Pipeline can be cached \"in-memory\", and avoided being evaluated again at a subsequent request, until it expires. A lookup cache key and a TTL can be set individually for any evaluator config in an AuthConfig.
Each cache config induces a completely independent cache table (or \"cache namespace\"). Consequently, different evaluator configs can use the same cache key and there will be no collision between entries from different evaluators.
E.g.:
spec:\n hosts:\n\n - my-api.io\n\n authentication: [\u2026]\n\n metadata:\n \"external-metadata\":\n http:\n url: http://my-external-source?search={context.request.http.path}\n cache:\n key:\n selector: context.request.http.path\n ttl: 300\n\n authorization:\n \"complex-policy\":\n opa:\n externalPolicy:\n url: http://my-policy-registry\n cache:\n key:\n selector: \"{auth.identity.group}-{context.request.http.method}-{context.request.http.path}\"\n ttl: 60\n
The example above sets caching for the 'external-metadata' metadata config and for the 'complex-policy' authorization policy. In the case of 'external-metadata', the cache key is the path of the original HTTP request being authorized by Authorino (fetched dynamically from the Authorization JSON); i.e., after obtaining a metadata object from the external source for a given contextual HTTP path one first time, whenever that same HTTP path repeats in a subsequent request, Authorino will use the cached object instead of sending a request again to the external source of metadata. After 5 minutes (300 seconds), the cache entry will expire and Authorino will fetch again from the source if requested.
As for the 'complex-policy' authorization policy, the cache key is a string composed the 'group' the identity belongs to, the method of the HTTP request and the path of the HTTP request. Whenever these repeat, Authorino will use the result of the policy that was evaluated and cached priorly. Cache entries in this namespace expire after 60 seconds.
Notes on evaluator caching
Capacity - By default, each cache namespace is limited to 1 mb. Entries will be evicted following First-In-First-Out (FIFO) policy to release space. The individual capacity of cache namespaces is set at the level of the Authorino instance (via --evaluator-cache-size
command-line flag or spec.evaluatorCacheSize
field of the Authorino
CR).
Usage - Avoid caching objects whose evaluation is considered to be relatively cheap. Examples of operations associated to Authorino auth features that are usually NOT worth caching: validation of JSON Web Tokens (JWT), Kubernetes TokenReviews and SubjectAccessReviews, API key validation, simple JSON pattern-matching authorization rules, simple OPA policies. Examples of operations where caching may be desired: OAuth2 token introspection, fetching of metadata from external sources (via HTTP request), complex OPA policies.
"},{"location":"authorino/docs/features/#common-feature-metrics-metrics","title":"Common feature: Metrics (metrics
)","text":"By default, Authorino will only export metrics down to the level of the AuthConfig. Deeper metrics at the level of each evaluator within an AuthConfig can be activated by setting the common field metrics: true
of the evaluator config.
E.g.:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-authconfig\n namespace: my-ns\nspec:\n metadata:\n \"my-external-metadata\":\n http:\n url: http://my-external-source?search={context.request.http.path}\n metrics: true\n
The above will enable the metrics auth_server_evaluator_duration_seconds
(histogram) and auth_server_evaluator_total
(counter) with labels namespace=\"my-ns\"
, authconfig=\"my-authconfig\"
, evaluator_type=\"METADATA_GENERIC_HTTP\"
and evaluator_name=\"my-external-metadata\"
.
The same pattern works for other types of evaluators. Find below the list of all types and corresponding label constant used in the metric:
Evaluator type Metric's evaluator_type
label authentication.apiKey
IDENTITY_APIKEY authentication.kubernetesTokenReview
IDENTITY_KUBERNETES authentication.jwt
IDENTITY_OIDC authentication.oauth2Introspection
IDENTITY_OAUTH2 authentication.x509
IDENTITY_MTLS authentication.plain
IDENTITY_PLAIN authentication.anonymous
IDENTITY_NOOP metadata.http
METADATA_GENERIC_HTTP metadata.userInfo
METADATA_USERINFO metadata.uma
METADATA_UMA authorization.patternMatching
AUTHORIZATION_JSON authorization.opa
AUTHORIZATION_OPA authorization.kubernetesSubjectAccessReview
AUTHORIZATION_KUBERNETES authorization.spicedb
AUTHORIZATION_AUTHZED response.success..plain
RESPONSE_PLAIN response.success..json
RESPONSE_JSON response.success..wristband
RESPONSE_WRISTBAND Metrics at the level of the evaluators can also be enforced to an entire Authorino instance, by setting the --deep-metrics-enabled
command-line flag. In this case, regardless of the value of the field spec.(authentication|metadata|authorization|response).metrics
in the AuthConfigs, individual metrics for all evaluators of all AuthConfigs will be exported.
For more information about metrics exported by Authorino, see Observability.
"},{"location":"authorino/docs/getting-started/","title":"Getting started","text":"This page covers requirements and instructions to deploy Authorino on a Kubernetes cluster, as well as the steps to declare, apply and try out a protection layer of authentication and authorization over your service, clean-up and complete uninstallation.
If you prefer learning with an example, check out our Hello World.
"},{"location":"authorino/docs/getting-started/#requirements","title":"Requirements","text":""},{"location":"authorino/docs/getting-started/#platform-requirements","title":"Platform requirements","text":"These are the platform requirements to use Authorino:
-
Kubernetes server (recommended v1.21 or later), with permission to create Kubernetes Custom Resource Definitions (CRDs) (for bootstrapping Authorino and Authorino Operator)
Alternative: K8s distros and platforms
Alternatively to upstream Kubernetes, you should be able to use any other Kubernetes distribution or Kubernetes Management Platform (KMP) with support for Kubernetes Custom Resources Definitions (CRD) and custom controllers, such as Red Hat OpenShift, IBM Cloud Kubernetes Service (IKS), Google Kubernetes Engine (GKE), Amazon Elastic Kubernetes Service (EKS) and Azure Kubernetes Service (AKS).
-
Envoy proxy (recommended v1.19 or later), to wire up Upstream services (i.e. the services to be protected with Authorino) and external authorization filter (Authorino) for integrations based on the reverse-proxy architecture - example
Alternative: Non-reverse-proxy integration
Technically, any client that implements Envoy's external authorization gRPC protocol should be compatible with Authorino. For integrations based on the reverse-proxy architecture nevertheless, we strongly recommended that you leverage Envoy alongside Authorino.
"},{"location":"authorino/docs/getting-started/#feature-specific-requirements","title":"Feature-specific requirements","text":"A few examples are:
-
For OpenID Connect, make sure you have access to an identity provider (IdP) and an authority that can issue ID tokens (JWTs). Check out Keycloak which can solve both and connect to external identity sources and user federation like LDAP.
-
For Kubernetes authentication tokens, platform support for the TokenReview and SubjectAccessReview APIs of Kubernetes shall be required. In case you want to be able to requests access tokens for clients running outside the custer, you may also want to check out the requisites for using Kubernetes TokenRequest API (GA in v1.20).
-
For User-Managed Access (UMA) resource data, you will need a UMA-compliant server running as well. This can be an implementation of the UMA protocol by each upstream API itself or (more typically) an external server that knows about the resources. Again, Keycloak can be a good fit here as well. Just keep in mind that, whatever resource server you choose, changing-state actions commanded in the upstream APIs or other parties will have to be reflected in the resource server. Authorino will not do that for you.
Check out the Feature specification page for more feature-specific requirements.
"},{"location":"authorino/docs/getting-started/#installation","title":"Installation","text":""},{"location":"authorino/docs/getting-started/#step-install-the-authorino-operator","title":"Step: Install the Authorino Operator","text":"The simplest way to install the Authorino Operator is by applying the manifest bundle:
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
The above will install the latest build of the Authorino Operator and latest version of the manifests (CRDs and RBAC), which by default points as well to the latest build of Authorino, both based on the main
branches of each component. To install a stable released version of the Operator and therefore also defaults to its latest compatible stable release of Authorino, replace main
with another tag of a proper release of the Operator, e.g. 'v0.2.0'.
This step will also install cert-manager in the cluster (required).
Alternatively, you can deploy the Authorino Operator using the Operator Lifecycle Manager bundles. For instructions, check out Installing via OLM.
"},{"location":"authorino/docs/getting-started/#step-request-an-authorino-instance","title":"Step: Request an Authorino instance","text":"Choose either cluster-wide or namespaced deployment mode and whether you want TLS termination enabled for the Authorino endpoints (gRPC authorization, raw HTTP authorization, and OIDC Festival Wristband Discovery listeners), and follow the corresponding instructions below.
The instructions here are for centralized gateway or centralized authorization service architecture. Check out the Topologies section of the docs for alternatively running Authorino in a sidecar container.
Cluster-wide (with TLS) Create the namespace:
kubectl create namespace authorino\n
Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secret
s in the namespace):
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n
Deploy Authorino:
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n replicas: 1\n clusterWide: true\n listener:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
Cluster-wide (without TLS) kubectl create namespace authorino\nkubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: true\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
Namespaced (with TLS) Create the namespace:
kubectl create namespace myapp\n
Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secret
s in the namespace):
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/myapp/g\" | kubectl -n myapp apply -f -\n
Deploy Authorino:
kubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: false\n listener:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
Namespaced (without TLS) kubectl create namespace myapp\nkubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n image: quay.io/kuadrant/authorino:latest\n replicas: 1\n clusterWide: false\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/getting-started/#protect-a-service","title":"Protect a service","text":"The most typical integration to protect services with Authorino is by putting the service (upstream) behind a reverse-proxy or API gateway, enabled with an authorization filter that ensures all requests to the service are first checked with the authorization server (Authorino).
To do that, make sure you have your upstream service deployed and running, usually in the same Kubernetes server where you installed Authorino. Then, setup an Envoy proxy and create an Authorino AuthConfig
for your service.
Authorino exposes 2 interfaces to serve the authorization requests:
- a gRPC interface that implements Envoy's External Authorization protocol;
- a raw HTTP authorization interface, suitable for using Authorino with Kubernetes ValidatingWebhook, for Envoy external authorization via HTTP, and other integrations (e.g. other proxies).
To use Authorino as a simple satellite (sidecar) Policy Decision Point (PDP), applications can integrate directly via any of these interfaces. By integrating via a proxy or API gateway, the combination makes Authorino to perform as an external Policy Enforcement Point (PEP) completely decoupled from the application.
"},{"location":"authorino/docs/getting-started/#life-cycle","title":"Life cycle","text":""},{"location":"authorino/docs/getting-started/#step-setup-envoy","title":"Step: Setup Envoy","text":"To configure Envoy for proxying requests targeting the upstream service and authorizing with Authorino, setup an Envoy configuration that enables Envoy's external authorization HTTP filter. Store the configuration in a ConfigMap
.
These are the important bits in the Envoy configuration to activate Authorino:
static_resources:\n listeners:\n\n - address: {\u2026} # TCP socket address and port of the proxy\n filter_chains:\n - filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n route_config: {\u2026} # routing configs - virtual host domain and endpoint matching patterns and corresponding upstream services to redirect the traffic\n http_filters:\n - name: envoy.filters.http.ext_authz # the external authorization filter\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false # ensures only authenticated and authorized traffic goes through\n grpc_service:\n envoy_grpc:\n cluster_name: authorino\n timeout: 1s\n clusters:\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization # name of the Authorino service deployed \u2013 it can be the fully qualified name with `.<namespace>.svc.cluster.local` suffix (e.g. `authorino-authorino-authorization.myapp.svc.cluster.local`)\n port_value: 50051\n transport_socket: # in case TLS termination is enabled in Authorino; omit it otherwise\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n common_tls_context:\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/authorino-ca-cert.crt\n
For a complete Envoy ConfigMap
containing an upstream API protected with Authorino, with TLS enabled and option for rate limiting with Limitador, plus a webapp served with under the same domain of the protected API, check out this example.
After creating the ConfigMap
with the Envoy configuration, create an Envoy Deployment
and Service
. E.g.:
kubectl -n myapp apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: envoy\n labels:\n app: envoy\nspec:\n selector:\n matchLabels:\n app: envoy\n template:\n metadata:\n labels:\n app: envoy\n spec:\n containers:\n\n - name: envoy\n image: envoyproxy/envoy:v1.19-latest\n command: [\"/usr/local/bin/envoy\"]\n args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n ports:\n - name: web\n containerPort: 8000 # matches the address of the listener in the envoy config\n volumeMounts:\n - name: config\n mountPath: /usr/local/etc/envoy\n readOnly: true\n - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n subPath: ca.crt\n mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n readOnly: true\n volumes:\n - name: config\n configMap:\n name: envoy\n items:\n - key: envoy.yaml\n path: envoy.yaml\n - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n secret:\n defaultMode: 420\n secretName: authorino-ca-cert\n replicas: 1\nEOF\n
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Service\nmetadata:\n name: envoy\nspec:\n selector:\n app: envoy\n ports:\n\n - name: web\n port: 8000\n protocol: TCP\nEOF\n
"},{"location":"authorino/docs/getting-started/#step-apply-an-authconfig","title":"Step: Apply an AuthConfig
","text":"Check out the docs for a full description of Authorino's AuthConfig
Custom Resource Definition (CRD) and its features.
For examples based on specific use-cases, check out the User guides.
For authentication based on OpenID Connect (OIDC) JSON Web Tokens (JWT), plus one simple JWT claim authorization check, a typical AuthConfig
custom resource looks like the following:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: my-api-protection\nspec:\n hosts: # any hosts that resolve to the envoy service and envoy routing config where the external authorization filter is enabled\n\n - my-api.io # north-south traffic through a Kubernetes `Ingress` or OpenShift `Route`\n - my-api.myapp.svc.cluster.local # east-west traffic (between applications within the cluster)\n authentication:\n \"idp-users\":\n jwt:\n issuerUrl: https://my-idp.com/auth/realm\n authorization:\n \"check-claim\":\n patternMatching:\n patterns:\n - selector: auth.identity.group\n operator: eq\n value: allowed-users\nEOF\n
After applying the AuthConfig
, consumers of the protected service should be able to start sending requests.
"},{"location":"authorino/docs/getting-started/#clean-up","title":"Clean-up","text":""},{"location":"authorino/docs/getting-started/#remove-protection","title":"Remove protection","text":"Delete the AuthConfig
:
kubectl -n myapp delete authconfig/my-api-protection\n
Decommission the Authorino instance:
kubectl -n myapp delete authorino/authorino\n
"},{"location":"authorino/docs/getting-started/#uninstall","title":"Uninstall","text":"To completely remove Authorino CRDs, run from the Authorino Operator directory:
make uninstall\n
"},{"location":"authorino/docs/getting-started/#next-steps","title":"Next steps","text":" - Read the docs. The Architecture page and the Features page are good starting points to learn more about how Authorino works and its functionalities.
- Check out the User guides for several examples of
AuthConfig
s based on specific use-cases
"},{"location":"authorino/docs/terminology/","title":"Terminology","text":"Here we define some terms that are used in the project, with the goal of avoiding confusion and facilitating more accurate conversations related to Authorino
.
If you see terms used that are not here (or are used in place of terms here) please consider contributing a definition to this doc with a PR, or modifying the use elsewhere to align with these terms.
"},{"location":"authorino/docs/terminology/#terms","title":"Terms","text":"Access token Type of temporary password (security token), tied to an authenticated identity, issued by an auth server as of request from either the identity subject itself or a registered auth client known by the auth server, and that delegates to a party powers to operate on behalf of that identity before a resource server; it can be formatted as an opaque data string or as an encoded JSON Web Token (JWT).
Application Programming Interface (API) Interface that defines interactions between multiple software applications; (in HTTP communication) set of endpoints and specification to expose resources hosted by a resource server, to be consumed by client applications; the access facade of a resource server.
Attribute-based Access Control (ABAC) Authorization model that grants/denies access to resources based on evaluation of authorization policies which combine attributes together (from claims, from the request, from the resource, etc).
Auth Usually employed as a short for authentication and authorization together (AuthN/AuthZ).
Auth client Application client (software) that uses an auth server, either in the process of authenticating and/or authorizing identity subjects (including self) who want to consume resources from a resources server or auth server.
Auth server Server where auth clients, users, roles, scopes, resources, policies and permissions can be stored and managed.
Authentication (AuthN) Process of verifying that a given credential belongs to a claimed-to-be identity; usually resulting in the issuing of an access token.
Authorization (AuthZ) Process of granting (or denying) access over a resource to a party based on the set of authorization rules, policies and/or permissions enforced.
Authorization header HTTP request header frequently used to carry credentials to authenticate a user in an HTTP communication, like in requests sent to an API; alternatives usually include credentials carried in another (custom) HTTP header, query string parameter or HTTP cookie.
Capability Usually employed to refer to a management feature of a Kubernetes-native system, based on the definition and use of Kubernetes Custom Resources (CRDs and CRs), that enables that system to one of the following \u201ccapability levels\u201d: Basic Install, Seamless Upgrades, Full Lifecycle, Deep Insights, Auto Pilot.
Claim Attribute packed in a security token which represents a claim that one who bears the token is making about an entity, usually an identity subject.
Client ID Unique identifier of an auth client within an auth server domain (or auth server realm).
Client secret Password presented by auth clients together with their Client IDs while authenticating with an auth server, either when requesting access tokens to be issued or when consuming services from the auth servers in general.
Delegation Process of granting a party (usually an auth client) with powers to act, often with limited scope, on behalf of an identity, to access resources from a resource server. See also OAuth2.
Hash-based Message Authentication Code (HMAC) Specific type of message authentication code (MAC) that involves a cryptographic hash function and a shared secret cryptographic key; it can be used to verify the authenticity of a message and therefore as an authentication method.
Identity Set of properties that qualifies a subject as a strong identifiable entity (usually a user), who can be authenticated by an auth server. See also Claims.
Identity and Access Management (IAM) system Auth system that implements and/or connects with sources of identity (IdP) and offers interfaces for managing access (authorization policies and permissions). See also Auth server.
Identity Provider (IdP) Source of identity; it can be a feature of an auth server or external source connected to an auth server.
ID token Special type of access token; an encoded JSON Web Token (JWT) that packs claims about an identity.
JSON Web Token (JWT) JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties.
JSON Web Signature (JWS) Standard for signing arbitrary data, especially JSON Web Tokens (JWT).
JSON Web Key Set (JWKS) Set of keys containing the public keys used to verify any JSON Web Token (JWT).
Keycloak Open source auth server to allow single sign-on with identity and access management.
Lightweight Directory Access Protocol (LDAP) Open standard for distributed directory information services for sharing of information about users, systems, networks, services and applications.
Mutual Transport Layer Security (mTLS) Protocol for the mutual authentication of client-server communication, i.e., the client authenticates the server and the server authenticates the client, based on the acceptance of the X.509 certificates of each party.
OAuth 2.0 (OAuth2) Industry-standard protocol for delegation.
OpenID Connect (OIDC) Simple identity verification (authentication) layer built on top of the OAuth2 protocol.
Open Policy Agent (OPA) Authorization policy agent that enables the usage of declarative authorization policies written in Rego language.
Opaque token Security token devoid of explicit meaning (e.g. random string); it requires the usage of lookup mechanism to be translated into a meaningful set claims representing an identity.
Permission Association between a protected resource the authorization policies that must be evaluated whether access should be granted; e.g. <user|group|role>
CAN DO <action>
ON RESOURCE <X>
.
Policy Rule or condition (authorization policy) that must be satisfied to grant access to a resource; strongly related to the different access control mechanisms (ACMs) and strategies one can use to protect resources, e.g. attribute-based access control (ABAC), role-based access control (RBAC), context-based access control, user-based access control (UBAC).
Policy Administration Point (PAP) Set of UIs and APIs to manage resources servers, resources, scopes, policies and permissions; it is where the auth system is configured.
Policy Decision Point (PDP) Where the authorization requests are sent, with permissions being requested, and authorization policies are evaluated accordingly.
Policy Enforcement Point (PEP) Where the authorization is effectively enforced, usually at the resource server or at a proxy, based on a response provided by the Policy Decision Point (PDP).
Policy storage Where policies are stored and from where they can be fetched, perhaps to be cached.
Red Hat SSO Auth server; downstream product created from the Keycloak Open Source project.
Refresh token Special type of security token, often provided together with an access token in an OAuth2 flow, used to renew the duration of an access token before it expires; it requires client authentication.
Request Party Token (RPT) JSON Web Token (JWT) digitally signed using JSON Web Signature (JWS), issued by the Keycloak auth server.
Resource One or more endpoints of a system, API or server, that can be protected.
Resource-level Access Control (RLAC) Authorization model that takes into consideration attributes of each specific request resource to grant/deny access to those resources (e.g. the resource's owner).
Resource server Server that hosts protected resources.
Role Aspect of a user\u2019s identity assigned to the user to indicate the level of access they should have to the system; essentially, roles represent collections of permissions
Role-based Access Control (RBAC) Authorization model that grants/denies access to resources based on the roles of authenticated users (rather than on complex attributes/policy rules).
Scope Mechanism that defines the specific operations that applications can be allowed to do or information that they can request on an identity\u2019s behalf; often presented as a parameter when access is requested as a way to communicate what access is needed, and used by auth server to respond what actual access is granted.
Single Page Application (SPA) Web application or website that interacts with the user by dynamically rewriting the current web page with new data from the web server.
Single Sign-on (SSO) Authentication scheme that allows a user to log in with a single ID and password to any of several related, yet independent, software systems.
Upstream (In the context of authentication/authorization) API whose endpoints must be protected by the auth system; the unprotected service in front of which a protection layer is added (by connecting with a Policy Decision Point).
User-based Access Control (UBAC) Authorization model that grants/denies access to resources based on claims of the identity (attributes of the user).
User-Managed Access (UMA) OAuth2-based access management protocol, used for users of an auth server to control the authorization process, i.e. directly granting/denying access to user-owned resources to other requesting parties.
"},{"location":"authorino/docs/user-guides/","title":"User guides","text":" -
Hello World The basics of protecting an API with Authorino.
-
Authentication with Kubernetes tokens (TokenReview API) Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.
-
Authentication with API keys Issue API keys stored in Kubernetes Secret
s for clients to authenticate with your protected hosts.
-
Authentication with X.509 certificates and mTLS Verify client X.509 certificates against trusted root CAs.
-
OpenID Connect Discovery and authentication with JWTs Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).
-
OAuth 2.0 token introspection (RFC 7662) Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.
-
Passing credentials (Authorization
header, cookie headers and others) Customize where credentials are supplied in the request by each trusted source of identity.
-
HTTP \"Basic\" Authentication (RFC 7235) Turn Authorino API key Secret
s settings into HTTP basic auth.
-
Anonymous access Bypass identity verification or fall back to anonymous access when credentials fail to validate
-
Token normalization Normalize identity claims from trusted sources and reduce complexity in your policies.
-
Edge Authentication Architecture (EAA) Exchange satellite (outer-layer) authentication tokens for \"Festival Wristbands\" accepted ubiquitously at the inside of your network. Normalize from multiple and varied sources of identity and authentication methods in the edge of your architecture; filter privacy data, limit the scope of permissions, and simplify authorization rules to your internal microservices.
-
Fetching auth metadata from external sources Get online data from remote HTTP services to enhance authorization rules.
-
OpenID Connect UserInfo Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.
-
Resource-level authorization with User-Managed Access (UMA) resource registry Fetch resource attributes relevant for authorization from a User-Managed Access (UMA) resource registry such as Keycloak resource server clients.
-
Simple pattern-matching authorization policies Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.
-
OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.
-
Open Policy Agent (OPA) Rego policies Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.
-
Kubernetes RBAC for service authorization (SubjectAccessReview API) Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.
-
Authorization with Keycloak Authorization Services Use Authorino as an adapter for Keycloak Authorization Services without importing any library or rebuilding your application code.
-
Integration with Authzed/SpiceDB Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.
-
Injecting data in the request Inject HTTP headers with serialized JSON content.
-
Authenticated rate limiting (with Envoy Dynamic Metadata) Provide Envoy with dynamic metadata from the external authorization process to be injected and used by consecutive filters, such as by a rate limiting service.
-
Redirecting to a login page Customize response status code and headers on failed requests. E.g. redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized
; mask resources on access denied behind a 404 Not Found
response instead of 403 Forbidden
.
-
Mixing Envoy built-in filter for auth and Authorino Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.
-
Host override via context extension Induce the lookup of an AuthConfig by supplying extended host context, for use cases such as of path prefix-based lookup and wildcard subdomains lookup.
-
Using Authorino as ValidatingWebhook service Use Authorino as a generic Kubernetes ValidatingWebhook service where the rules to validate a request to the Kubernetes API are written in an AuthConfig.
-
Reducing the operational space: sharding, noise and multi-tenancy Have multiple instances of Authorino running in the same space (Kubernetes namespace or cluster-scoped), yet watching particular sets of resources.
-
Caching Cache auth objects resolved at runtime for any configuration bit of an AuthConfig, for easy access in subsequent requests whenever an arbitrary cache key repeats, until the cache entry expires.
-
Observability Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.
"},{"location":"authorino/docs/user-guides/anonymous-access/","title":"User guide: Anonymous access","text":"Bypass identity verification or fall back to anonymous access when credentials fail to validate
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Anonymous access
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/anonymous-access/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/anonymous-access/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"public\":\n anonymous: {}\nEOF\n
The example above enables anonymous access (i.e. removes authentication), without adding any extra layer of protection to the API. This is virtually equivalent to setting a top-level condition to the AuthConfig
that always skips the configuration, or to switching authentication/authorization off completely in the route to the API.
For more sophisticated use cases of anonymous access with Authorino, consider combining this feature with other identity sources in the AuthConfig
while playing with the priorities of each source, as well as combination with when
conditions, and/or adding authorization policies that either cover authentication or address anonymous access with proper rules (e.g. enforcing read-only access).
Check out the docs for the Anonymous access feature for an example of an AuthConfig
that falls back to anonymous access when a priority OIDC/JWT-based authentication fails, and enforces a read-only policy in such cases.
"},{"location":"authorino/docs/user-guides/anonymous-access/#consume-the-api","title":"\u277b Consume the API","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/anonymous-access/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/api-key-authentication/","title":"User guide: Authentication with API keys","text":"Issue API keys stored in Kubernetes Secret
s for clients to authenticate with your protected hosts.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 API key
In Authorino, API keys are stored as Kubernetes Secret
s. Each resource must contain an api_key
entry with the value of the API key, and labeled to match the selectors specified in spec.identity.apiKey.selector
of the AuthConfig
.
API key Secret
s must also include labels that match the secretLabelSelector
field of the Authorino instance. See Resource reconciliation and status update for details.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/api-key-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/api-key-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a valid API key:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
With missing or invalid API key:
curl -H 'Authorization: APIKEY invalid' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"friends\"\n# x-ext-auth-reason: the API Key provided is invalid\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#delete-an-api-key-revoke-access-to-the-api","title":"\u277d Delete an API key (revoke access to the API)","text":"kubectl delete secret/api-key-1\n
"},{"location":"authorino/docs/user-guides/api-key-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/","title":"User guide: Authenticated rate limiting (with Envoy Dynamic Metadata)","text":"Provide Envoy with dynamic metadata about the external authorization process to be injected into the rate limiting filter.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Response wrappers \u2192 Envoy Dynamic Metadata
- Dynamic response \u2192 JSON injection
- Identity verification & authentication \u2192 API key
Dynamic JSON objects built out of static values and values fetched from the Authorization JSON can be wrapped to be returned to the reverse-proxy as Envoy Well Known Dynamic Metadata content. Envoy can use those to inject data returned by the external authorization service into the other filters, such as the rate limiting filter.
Check out as well the user guides about Injecting data in the request and Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-limitador","title":"\u2778 Deploy Limitador","text":"Limitador is a lightweight rate limiting service that can be used with Envoy.
On this bundle, we will deploy Limitador pre-configured to limit requests to the talker-api
domain up to 5 requests per interval of 60 seconds per user_id
. Envoy will be configured to recognize the presence of Limitador and activate it on requests to the Talker API.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-the-talker-api","title":"\u2779 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#setup-envoy","title":"\u277a Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-an-authconfig","title":"\u277b Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
An annotation auth-data/username
will be read from the Kubernetes API Key secret and passed as dynamic metadata { \"ext_auth_data\": { \"username\": \u00abannotations.auth-data/username\u00bb } }
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n dynamicMetadata:\n \"rate-limit\":\n json:\n properties:\n \"username\":\n selector: auth.identity.metadata.annotations.auth-data\\/username\n key: ext_auth_data # how this bit of dynamic metadata from the ext authz service is named in the Envoy config\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-the-api-keys","title":"\u277c Create the API keys","text":"For user John:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/username: john\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
For user Jane:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/username: jane\nstringData:\n api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#consume-the-api","title":"\u277d Consume the API","text":"As John:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Repeat the request a few more times within the 60-second time window, until the response status is 429 Too Many Requests
.
While the API is still limited to John, send requests as Jane:
curl -H 'Authorization: APIKEY 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/authzed/","title":"User guide: Integration with Authzed/SpiceDB","text":"Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.
Authorino capabilities featured in this guide: - Authorization \u2192 SpiceDB
- Identity verification & authentication \u2192 API key
"},{"location":"authorino/docs/user-guides/authzed/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/authzed/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/authzed/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/authzed/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/authzed/#create-the-permission-database","title":"\u277a Create the permission database","text":"Create the namespace:
kubectl create namespace spicedb\n
Create the SpiceDB instance:
kubectl -n spicedb apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: spicedb\n labels:\n app: spicedb\nspec:\n selector:\n matchLabels:\n app: spicedb\n template:\n metadata:\n labels:\n app: spicedb\n spec:\n containers:\n\n - name: spicedb\n image: authzed/spicedb\n args:\n - serve\n - \"--grpc-preshared-key\"\n - secret\n - \"--http-enabled\"\n ports:\n - containerPort: 50051\n - containerPort: 8443\n replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: spicedb\nspec:\n selector:\n app: spicedb\n ports:\n - name: grpc\n port: 50051\n protocol: TCP\n - name: http\n port: 8443\n protocol: TCP\nEOF\n
Forward local request to the SpiceDB service inside the cluster:
kubectl -n spicedb port-forward service/spicedb 8443:8443 2>&1 >/dev/null &\n
Create the permission schema:
curl -X POST http://localhost:8443/v1/schema/write \\\n -H 'Authorization: Bearer secret' \\\n -H 'Content-Type: application/json' \\\n -d @- << EOF\n{\n \"schema\": \"definition blog/user {}\\ndefinition blog/post {\\n\\trelation reader: blog/user\\n\\trelation writer: blog/user\\n\\n\\tpermission read = reader + writer\\n\\tpermission write = writer\\n}\"\n}\nEOF\n
Create the relationships:
blog/user:emilia
\u2192 writer
of blog/post:1
blog/user:beatrice
\u2192 reader
of blog/post:1
curl -X POST http://localhost:8443/v1/relationships/write \\\n -H 'Authorization: Bearer secret' \\\n -H 'Content-Type: application/json' \\\n -d @- << EOF\n{\n \"updates\": [\n {\n \"operation\": \"OPERATION_CREATE\",\n \"relationship\": {\n \"resource\": {\n \"objectType\": \"blog/post\",\n \"objectId\": \"1\"\n },\n \"relation\": \"writer\",\n \"subject\": {\n \"object\": {\n \"objectType\": \"blog/user\",\n \"objectId\": \"emilia\"\n }\n }\n }\n },\n {\n \"operation\": \"OPERATION_CREATE\",\n \"relationship\": {\n \"resource\": {\n \"objectType\": \"blog/post\",\n \"objectId\": \"1\"\n },\n \"relation\": \"reader\",\n \"subject\": {\n \"object\": {\n \"objectType\": \"blog/user\",\n \"objectId\": \"beatrice\"\n }\n }\n }\n }\n ]\n}\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Store the shared token for Authorino to authenticate with the SpiceDB instance in a Service:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: spicedb\n labels:\n app: spicedb\nstringData:\n grpc-preshared-key: secret\nEOF\n
Create the AuthConfig:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"blog-users\":\n apiKey:\n selector:\n matchLabels:\n app: talker-api\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"authzed-spicedb\":\n spicedb:\n endpoint: spicedb.spicedb.svc.cluster.local:50051\n insecure: true\n sharedSecretRef:\n name: spicedb\n key: grpc-preshared-key\n subject:\n kind:\n value: blog/user\n name:\n selector: auth.identity.metadata.annotations.username\n resource:\n kind:\n value: blog/post\n name:\n selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}\n permission:\n selector: context.request.http.method.@replace:{\"old\":\"GET\",\"new\":\"read\"}.@replace:{\"old\":\"POST\",\"new\":\"write\"}\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#create-the-api-keys","title":"\u277c Create the API keys","text":"For Emilia (writer):
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-writer\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: talker-api\n annotations:\n username: emilia\nstringData:\n api_key: IAMEMILIA\nEOF\n
For Beatrice (reader):
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-reader\n labels:\n authorino.kuadrant.io/managed-by: authorino\n app: talker-api\n annotations:\n username: beatrice\nstringData:\n api_key: IAMBEATRICE\nEOF\n
"},{"location":"authorino/docs/user-guides/authzed/#consume-the-api","title":"\u277d Consume the API","text":"As Emilia, send a GET request:
curl -H 'Authorization: APIKEY IAMEMILIA' \\\n -X GET \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Emilia, send a POST request:
curl -H 'Authorization: APIKEY IAMEMILIA' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Beatrice, send a GET request:
curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n -X GET \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n
As Beatrice, send a POST request:
curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: PERMISSIONSHIP_NO_PERMISSION;token=GhUKEzE2NzU3MDE3MjAwMDAwMDAwMDA=\n
"},{"location":"authorino/docs/user-guides/authzed/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace spicedb\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/caching/","title":"User guide: Caching","text":"Cache auth objects resolved at runtime for any configuration bit of an AuthConfig (i.e. any evaluator), of any phase (identity, metadata, authorization and dynamic response), for easy access in subsequent requests, whenever an arbitrary (user-defined) cache key repeats, until the cache entry expires.
This is particularly useful for configuration bits whose evaluation is significantly more expensive than accessing the cache. E.g.:
- Caching of metadata fetched from external sources in general
- Caching of previously validated identity access tokens (e.g. for OAuth2 opaque tokens that involve consuming the token introspection endpoint of an external auth server)
- Caching of complex Rego policies that involve sending requests to external services
Cases where one will NOT want to enable caching, due to relatively cheap compared to accessing and managing the cache:
- Validation of OIDC/JWT access tokens
- OPA/Rego policies that do not involve external requests
- JSON pattern-matching authorization
- Dynamic JSON responses
- Anonymous access
Authorino capabilities featured in this guide: - Common feature \u2192 Caching
- Identity verification & authentication \u2192 Anonymous access
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Dynamic response \u2192 JSON injection
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/caching/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/caching/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/caching/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/caching/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/caching/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/caching/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The example below enables caching for the external source of metadata, which in this case, for convenience, is the same upstream API protected by Authorino (i.e. the Talker API), though consumed directly by Authorino, without passing through the proxy. This API generates a uuid
random hash that it injects in the JSON response. This value is different in every request processed by the API.
The example also enables caching of returned OPA virtual documents. cached-authz
is a trivial Rego policy that always grants access, but generates a timestamp, which Authorino will cache.
In both cases, the path of the HTTP request is used as cache key. I.e., whenever the path repeats, Authorino reuse the values stored previously in each cache table (cached-metadata
and cached-authz
), respectively saving a request to the external source of metadata and the evaluation of the OPA policy. Cache entries will expire in both cases after 60 seconds they were stored in the cache.
The cached values will be visible in the response returned by the Talker API in x-authz-data
header injected by Authorino. This way, we can tell when an existing value in the cache was used and when a new one was generated and stored.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"anonymous\":\n anonymous: {}\n metadata:\n \"cached-metadata\":\n http:\n url: \"http://talker-api.default.svc.cluster.local:3000/metadata/{context.request.http.path}\"\n cache:\n key:\n selector: context.request.http.path\n ttl: 60\n authorization:\n \"cached-authz\":\n opa:\n rego: |\n now = time.now_ns()\n allow = true\n allValues: true\n cache:\n key:\n selector: context.request.http.path\n ttl: 60\n response:\n success:\n headers:\n \"x-authz-data\":\n json:\n properties:\n \"cached-metadata\":\n selector: auth.metadata.cached-metadata.uuid\n \"cached-authz\":\n selector: auth.authorization.cached-authz.now\nEOF\n
"},{"location":"authorino/docs/user-guides/caching/#consume-the-api","title":"\u277b Consume the API","text":" - To
/hello
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",\n# [\u2026]\n
- To a different path
curl http://talker-api.127.0.0.1.nip.io:8000/goodbye\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343097860450300\\\",\\\"cached-metadata\\\":\\\"37fce386-1ee8-40a7-aed1-bf8a208f283c\\\"}\",\n# [\u2026]\n
- To
/hello
again before the cache entry expires (60 seconds from the first request sent to this path)
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\", <=== same cache-id as before\n# [\u2026]\n
- To
/hello
again after the cache entry expires (60 seconds from the first request sent to this path)
curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n# \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343135702743800\\\",\\\"cached-metadata\\\":\\\"e708a3a6-5caf-4028-ab5c-573ad9be7188\\\"}\", <=== different cache-id\n# [\u2026]\n
"},{"location":"authorino/docs/user-guides/caching/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/","title":"User guide: Redirecting to a login page","text":"Customize response status code and headers on failed requests to redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized
.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Custom denial status
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
Authorino's default response status codes, messages and headers for unauthenticated (401
) and unauthorized (403
) requests can be customized with static values and values fetched from the Authorization JSON.
Check out as well the user guides about HTTP \"Basic\" Authentication (RFC 7235) and OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample web application called Matrix Quotes to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-the-matrix-quotes-web-application","title":"\u2778 Deploy the Matrix Quotes web application","text":"The Matrix Quotes is a static web application that contains quotes from the film The Matrix.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Matrix Quotes webapp behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/envoy-deploy.yaml\n
The command above creates an Ingress
with host name matrix-quotes.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: matrix-quotes-protection\nspec:\n hosts:\n\n - matrix-quotes.127.0.0.1.nip.io\n authentication:\n \"browser-users\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n cookie:\n name: TOKEN\n \"http-basic-auth\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n authorizationHeader:\n prefix: Basic\n response:\n unauthenticated:\n code: 302\n headers:\n \"Location\":\n selector: \"http://matrix-quotes.127.0.0.1.nip.io:8000/login.html?redirect_to={request.path}\"\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: user-credential-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: am9objpw # john:p\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application","title":"\u277c Consume the application","text":"On a web browser, navigate to http://matrix-quotes.127.0.0.1.nip.io:8000.
Click on the cards to read quotes from characters of the movie. You should be redirected to login page.
Log in using John's credentials:
- Username: john
- Password: p
Click again on the cards and check that now you are able to access the inner pages.
You can also consume a protected endpoint of the application using HTTP Basic Authentication:
curl -u john:p http://matrix-quotes.127.0.0.1.nip.io:8000/neo.html\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#optional-modify-the-authconfig-to-authenticate-with-oidc","title":"\u277d (Optional) Modify the AuthConfig
to authenticate with OIDC","text":""},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-a-keycloak-server","title":"Setup a Keycloak server","text":"Deploy a Keycloak server preloaded with a realm named kuadrant
:
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Resolve local Keycloak domain so it can be accessed from the local host and inside the cluster with the name: (This will be needed to redirect to Keycloak's login page and at the same time validate issued tokens.)
echo '127.0.0.1 keycloak' >> /etc/hosts\n
Forward local requests to the instance of Keycloak running in the cluster:
kubectl port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n
Create a client:
curl -H \"Authorization: Bearer $(curl http://keycloak:8080/realms/master/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=admin-cli' -d 'username=admin' -d 'password=p' | jq -r .access_token)\" \\\n -H 'Content-type: application/json' \\\n -d '{ \"name\": \"matrix-quotes\", \"clientId\": \"matrix-quotes\", \"publicClient\": true, \"redirectUris\": [\"http://matrix-quotes.127.0.0.1.nip.io:8000/auth*\"], \"enabled\": true }' \\\n http://keycloak:8080/admin/realms/kuadrant/clients\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#reconfigure-the-matrix-quotes-app-to-use-keycloaks-login-page","title":"Reconfigure the Matrix Quotes app to use Keycloak's login page","text":"kubectl set env deployment/matrix-quotes KEYCLOAK_REALM=http://keycloak:8080/realms/kuadrant CLIENT_ID=matrix-quotes\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#apply-the-changes-to-the-authconfig","title":"Apply the changes to the AuthConfig
","text":"kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: matrix-quotes-protection\nspec:\n hosts:\n\n - matrix-quotes.127.0.0.1.nip.io\n authentication:\n \"idp-users\":\n jwt:\n issuerUrl: http://keycloak:8080/realms/kuadrant\n credentials:\n cookie:\n name: TOKEN\n response:\n unauthenticated:\n code: 302\n headers:\n \"Location\":\n selector: \"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/auth?client_id=matrix-quotes&redirect_uri=http://matrix-quotes.127.0.0.1.nip.io:8000/auth?redirect_to={request.path}&scope=openid&response_type=code\"\nEOF\n
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application-again","title":"Consume the application again","text":"Refresh the browser window or navigate again to http://matrix-quotes.127.0.0.1.nip.io:8000.
Click on the cards to read quotes from characters of the movie. You should be redirected to login page this time served by the Keycloak server.
Log in as Jane (a user of the Keycloak realm):
- Username: jane
- Password: p
Click again on the cards and check that now you are able to access the inner pages.
"},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/user-credential-1\nkubectl delete authconfig/matrix-quotes-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/","title":"User guide: Edge Authentication Architecture (EAA)","text":"Edge Authentication Architecture (EAA) is a pattern where more than extracting authentication logics and specifics from the application codebase to a proper authN/authZ layer, this is pushed to the edge of your cloud network, without violating the Zero Trust principle nevertheless.
The very definition of \"edge\" is subject to discussion, but the underlying idea is that clients (e.g. API clients, IoT devices, etc.) authenticate with a layer that, before moving traffic to inside the network:
- understands the complexity of all the different methods of authentication supported;
- sometimes some token normalization is involved;
- eventually enforces some preliminary authorization policies; and
- possibly filters data bits that are sensitive to privacy concerns (e.g. to comply with local legislation such as GRPD, CCPA, etc)
As a minimum, EAA allows to simplify authentication between applications and microservices inside the network, as well as to reduce authorization to domain-specific rules and policies, rather than having to deal all the complexity to support all types of clients in every node.
Authorino capabilities featured in this guide: - Dynamic response \u2192 Festival Wristband tokens
- Identity verification & authentication \u2192 Identity extension
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
Festival Wristbands are OpenID Connect ID tokens (signed JWTs) issued by Authorino by the end of the Auth Pipeline, for authorized requests. It can be configured to include claims based on static values and values fetched from the Authorization JSON.
Check out as well the user guides about Token normalization, Authentication with API keys and OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
- jwt, to inspect JWTs (optional)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino and configuring 2 environments of an architecture, edge
and internal
.
The first environment is a facade for handling the first layer of authentication and exchanging any valid presented authentication token for a Festival Wristband token. In the second, we will deploy a sample service called Talker API that the authorization service will ensure to receive only authenticated traffic presented with a valid Festival Wristband.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u2779.
At steps \u2779 and \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-namespaces","title":"\u2777 Create the namespaces","text":"For simplicity, this examples will set up edge and internal nodes in different namespaces of the same Kubernetes cluster. Those will share a same single cluster-wide Authorino instance. In real-life scenarios, it does not have to be like that.
kubectl create namespace authorino\nkubectl create namespace edge\nkubectl create namespace internal\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-authorino","title":"\u2778 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources cluster-wide2, with TLS disabled3.
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-edge","title":"\u2779 Setup the Edge","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy","title":"Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up external authorization with the Authorino instance.4
kubectl -n edge apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-edge-deploy.yaml\n
The command above creates an Ingress
with host name edge.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 9000 to the Envoy service running inside the cluster:
kubectl -n edge port-forward deployment/envoy 9000:9000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig","title":"Create the AuthConfig
","text":"Create a required secret that will be used by Authorino to sign the Festival Wristband tokens:
kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: wristband-signing-key\nstringData:\n key.pem: |\n -----BEGIN EC PRIVATE KEY-----\n MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n -----END EC PRIVATE KEY-----\ntype: Opaque\nEOF\n
Create the config:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n edge apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: edge-auth\nspec:\n hosts:\n\n - edge.127.0.0.1.nip.io\n authentication:\n \"api-clients\":\n apiKey:\n selector:\n matchLabels:\n authorino.kuadrant.io/managed-by: authorino\n allNamespaces: true\n credentials:\n authorizationHeader:\n prefix: APIKEY\n overrides:\n \"username\":\n selector: auth.identity.metadata.annotations.authorino\\.kuadrant\\.io/username\n \"idp-users\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n defaults:\n \"username\":\n selector: auth.identity.preferred_username\n response:\n success:\n dynamicMetadata:\n \"wristband\":\n wristband:\n issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\n customClaims:\n \"username\":\n selector: auth.identity.username\n tokenDuration: 300\n signingKeyRefs:\n - name: wristband-signing-key\n algorithm: ES256\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-internal-workload","title":"\u277a Setup the internal workload","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-the-talker-api","title":"Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy_1","title":"Setup Envoy","text":"This other bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.
kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-node-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl -n internal port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig_1","title":"Create the AuthConfig
","text":"Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n internal apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"edge-authenticated\":\n jwt:\n issuerUrl: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n annotations:\n authorino.kuadrant.io/username: alice\n authorino.kuadrant.io/email: alice@host\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#using-the-api-key-to-authenticate","title":"Using the API key to authenticate","text":"Authenticate at the edge:
WRISTBAND_TOKEN=$(curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n
Consume the API:
curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Try to consume the API with authentication token that is only accepted in the edge:
curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"edge-authenticated\"\n# x-ext-auth-reason: credential not found\n
(Optional) Inspect the wristband token and verify that it only contains restricted info to authenticate and authorize with internal apps.
jwt decode $WRISTBAND_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# {\n# \"exp\": 1638452051,\n# \"iat\": 1638451751,\n# \"iss\": \"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\",\n# \"sub\": \"02cb51ea0e1c9f3c0960197a2518c8eb4f47e1b9222a968ffc8d4c8e783e4d19\",\n# \"username\": \"alice\"\n# }\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#authenticating-with-the-keycloak-server","title":"Authenticating with the Keycloak server","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl -n edge run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
(Optional) Inspect the access token issue by Keycloak and verify and how it contains more details about the identity than required to authenticate and authorize with internal apps.
jwt decode $ACCESS_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# { [...]\n# \"email\": \"jane@kuadrant.io\",\n# \"email_verified\": true,\n# \"exp\": 1638452220,\n# \"family_name\": \"Smith\",\n# \"given_name\": \"Jane\",\n# \"iat\": 1638451920,\n# \"iss\": \"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\",\n# \"jti\": \"699f6e49-dea4-4f29-ae2a-929a3a18c94b\",\n# \"name\": \"Jane Smith\",\n# \"preferred_username\": \"jane\",\n# \"realm_access\": {\n# \"roles\": [\n# \"offline_access\",\n# \"member\",\n# \"admin\",\n# \"uma_authorization\"\n# ]\n# },\n# [...]\n
As Jane, obtain a limited wristband token at the edge:
WRISTBAND_TOKEN=$(curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n
Consume the API:
curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete namespace edge\nkubectl delete namespace internal\nkubectl delete namespace authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino and Authorino Operator manifests, run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/","title":"User guide: Mixing Envoy built-in filter for auth and Authorino","text":"Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.
In this user guide, we will set up Envoy and Authorino to protect a service called the Talker API service, with JWT authentication handled in Envoy and a more complex authorization policy enforced in Authorino.
The policy defines a geo-fence by which only requests originated in Great Britain (country code: GB) will be accepted, unless the user is bound to a role called 'admin' in the auth server, in which case no geofence is enforced.
All requests to the Talker API will be authenticated in Envoy. However, requests to /global
will not trigger the external authorization.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Plain
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Pattern-matching authorization
- Dynamic response \u2192 Custom denial status
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: authorino\n name: envoy\ndata:\n envoy.yaml: |\n static_resources:\n clusters:\n\n - name: talker-api\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: talker-api\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: talker-api\n port_value: 3000\n - name: keycloak\n connect_timeout: 0.25s\n type: logical_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: keycloak\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: keycloak.keycloak.svc.cluster.local\n port_value: 8080\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization\n port_value: 50051\n listeners:\n - address:\n socket_address:\n address: 0.0.0.0\n port_value: 8000\n filter_chains:\n - filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n stat_prefix: local\n route_config:\n name: local_route\n virtual_hosts:\n - name: local_service\n domains: ['*']\n routes:\n - match: { path_separated_prefix: /global }\n route: { cluster: talker-api }\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n disabled: true\n - match: { prefix: / }\n route: { cluster: talker-api }\n http_filters:\n - name: envoy.filters.http.jwt_authn\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\n providers:\n keycloak:\n issuer: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n remote_jwks:\n http_uri:\n uri: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/certs\n cluster: keycloak\n timeout: 5s\n cache_duration:\n seconds: 300\n payload_in_metadata: verified_jwt\n rules:\n - match: { prefix: / }\n requires: { provider_name: keycloak }\n - name: envoy.filters.http.ext_authz\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false\n metadata_context_namespaces:\n - envoy.filters.http.jwt_authn\n grpc_service:\n envoy_grpc:\n cluster_name: authorino\n timeout: 1s\n - name: envoy.filters.http.router\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n use_remote_address: true\n admin:\n access_log_path: \"/tmp/admin_access.log\"\n address:\n socket_address:\n address: 0.0.0.0\n port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: authorino\n svc: envoy\n name: envoy\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: authorino\n svc: envoy\n template:\n metadata:\n labels:\n app: authorino\n svc: envoy\n spec:\n containers:\n - args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n command:\n - /usr/local/bin/envoy\n image: envoyproxy/envoy:v1.22-latest\n name: envoy\n ports:\n - containerPort: 8000\n name: web\n - containerPort: 8001\n name: admin\n volumeMounts:\n - mountPath: /usr/local/etc/envoy\n name: config\n readOnly: true\n volumes:\n - configMap:\n items:\n - key: envoy.yaml\n path: envoy.yaml\n name: envoy\n name: config\n---\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: authorino\n name: envoy\nspec:\n ports:\n - name: web\n port: 8000\n protocol: TCP\n selector:\n app: authorino\n svc: envoy\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: ingress-wildcard-host\nspec:\n rules:\n - host: talker-api.127.0.0.1.nip.io\n http:\n paths:\n - backend:\n service:\n name: envoy\n port:\n number: 8000\n path: /\n pathType: Prefix\nEOF\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-ip-location-service","title":"\u277a Deploy the IP Location service","text":"The IP Location service is a simple service that resolves an IPv4 address into geo location info.
kubectl apply -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/ip-location/ip-location-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#create-an-authconfig","title":"\u277b Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"jwt\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n metadata:\n \"geoinfo\":\n http:\n url: 'http://ip-location.default.svc.cluster.local:3000/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}'\n headers:\n \"Accept\":\n value: application/json\n cache:\n key:\n selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\":\\\",\\\"}\"\n authorization:\n \"geofence\":\n when:\n - selector: auth.identity.realm_access.roles\n operator: excl\n value: admin\n patternMatching:\n patterns:\n - selector: auth.metadata.geoinfo.country_iso_code\n operator: eq\n value: \"GB\"\n response:\n unauthorized:\n message:\n selector: \"The requested resource is not available in {auth.metadata.geoinfo.country_name}\"\nEOF\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-a-token-and-consume-the-api","title":"\u277c Obtain a token and consume the API","text":""},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user John, a non-admin (member) user:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As John, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: The requested resource is not available in Italy\n
As John, consume a path of the API that will cause Envoy to skip external authorization:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"Obtain an access token with the Keycloak server for Jane, an admin user:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As Jane, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n
As Jane, consume a path of the API that will cause Envoy to skip external authorization:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 109.69.200.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete ingress/ingress-wildcard-host\nkubectl delete service/envoy\nkubectl delete deployment/envoy\nkubectl delete configmap/envoy\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/external-metadata/","title":"User guide: Fetching auth metadata from external sources","text":"Get online data from remote HTTP services to enhance authorization rules.
Authorino capabilities featured in this guide: - External auth metadata \u2192 HTTP GET/GET-by-POST
- Identity verification & authentication \u2192 API key
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
You can configure Authorino to fetch additional metadata from external sources in request-time, by sending either GET or POST request to an HTTP service. The service is expected to return a JSON content which is appended to the Authorization JSON, thus becoming available for usage in other configs of the Auth Pipeline, such as in authorization policies or custom responses.
URL, parameters and headers of the request to the external source of metadata can be configured, including with dynamic values. Authentication between Authorino and the service can be set as part of these configuration options, or based on shared authentication token stored in a Kubernetes Secret
.
Check out as well the user guides about Authentication with API keys and Open Policy Agent (OPA) Rego policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/external-metadata/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/external-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/external-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/external-metadata/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/external-metadata/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/external-metadata/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, we will implement a geofence policy for the API, using OPA and metadata fetching from an external service that returns geolocalization JSON data for a given IP address. The policy establishes that only GET
requests are allowed and the path of the request should be in the form /{country-code}/*
, where {country-code}
is the 2-character code of the country where the client is identified as being physically present.
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n metadata:\n \"geo\":\n http:\n url: 'http://ip-api.com/json/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}?fields=countryCode'\n headers:\n \"Accept\":\n value: application/json\n authorization:\n \"geofence\":\n opa:\n rego: |\n import input.context.request.http\n\n allow {\n http.method = \"GET\"\n split(http.path, \"/\") = [_, requested_country, _]\n lower(requested_country) == lower(object.get(input.auth.metadata.geo, \"countryCode\", \"\"))\n }\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON, including the description of the @extract
string modifier.
"},{"location":"authorino/docs/user-guides/external-metadata/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/external-metadata/#consume-the-api","title":"\u277c Consume the API","text":"From an IP address assigned to the United Kingdom of Great Britain and Northern Ireland (country code GB):
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 79.123.45.67' \\\n http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 403 Forbidden\n
From an IP address assigned to Italy (country code IT):
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 109.112.34.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 403 Forbidden\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 109.112.34.56' \\\n http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/external-metadata/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/hello-world/","title":"User guide: Hello World","text":""},{"location":"authorino/docs/user-guides/hello-world/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant, you can skip step \u2778. You may already have Authorino installed and running as well. In this case, skip also step \u277a. If you even have your workload cluster configured, with sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, go straight to step \u277c.
At step \u277c, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/hello-world/#create-the-namespace","title":"\u2776 Create the namespace","text":"kubectl create namespace hello-world\n# namespace/hello-world created\n
"},{"location":"authorino/docs/user-guides/hello-world/#deploy-the-talker-api","title":"\u2777 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n# deployment.apps/talker-api created\n# service/talker-api created\n
"},{"location":"authorino/docs/user-guides/hello-world/#setup-envoy","title":"\u2778 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.1
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/envoy-deploy.yaml\n# configmap/envoy created\n# deployment.apps/envoy created\n# service/envoy created\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl -n hello-world port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-unprotected","title":"\u2779 Consume the API (unprotected)","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/hello-world/#protect-the-api","title":"\u277a Protect the API","text":""},{"location":"authorino/docs/user-guides/hello-world/#install-the-authorino-operator","title":"Install the Authorino Operator","text":"curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/hello-world/#deploy-authorino","title":"Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service2 that watches for AuthConfig
resources in the hello-world
namespace3, with TLS disabled4.
kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authorino.yaml\n# authorino.operator.authorino.kuadrant.io/authorino created\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-behind-envoy-and-authorino","title":"\u277b Consume the API behind Envoy and Authorino","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 404 Not Found\n# x-ext-auth-reason: Service not found\n
Authorino does not know about the talker-api.127.0.0.1.nip.io
host, hence the 404 Not Found
. Let's teach Authorino about this host by applying an AuthConfig
.
"},{"location":"authorino/docs/user-guides/hello-world/#apply-the-authconfig","title":"\u277c Apply the AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authconfig.yaml\n# authconfig.authorino.kuadrant.io/talker-api-protection created\n
"},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-without-credentials","title":"\u277d Consume the API without credentials","text":"curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-clients\"\n# x-ext-auth-reason: credential not found\n
"},{"location":"authorino/docs/user-guides/hello-world/#grant-access-to-the-api-with-a-tailor-made-security-scheme","title":"Grant access to the API with a tailor-made security scheme","text":"Check out other user guides for several use-cases of authentication and authorization, and the instructions to implement them using Authorino.
A few examples of available ser guides:
- Authentication with API keys
- Authentication with JWTs and OpenID Connect Discovery
- Authentication with Kubernetes tokens (TokenReview API)
- Authorization with Open Policy Agent (OPA) Rego policies
- Authorization with simple JSON pattern-matching rules (e.g. JWT claims)
- Authorization with Kubernetes RBAC (SubjectAccessReview API)
- Fetching auth metadata from external sources
- Token normalization
"},{"location":"authorino/docs/user-guides/hello-world/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the namespaces created in step 1 and 5:
kubectl delete namespace hello-world\nkubectl delete namespace authorino-operator\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/host-override/","title":"Host override via context extension","text":"By default, Authorino uses the host information of the HTTP request (Attributes.Http.Host
) to lookup for an indexed AuthConfig to be enforced1. The host info be overridden by supplying a host
entry as a (per-route) context extension (Attributes.ContextExtensions
), which takes precedence whenever present.
Overriding the host attribute of the HTTP request can be useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup.
\u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. In this guide:
- Example of host override for path prefix-based lookup
- Example of host override for wildcard subdomain lookup
"},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-path-prefix-based-lookup","title":"Example of host override for path prefix-based lookup","text":"In this use case, 2 different APIs (i.e. Dogs API and Cats API) are served under the same base domain, and differentiated by the path prefix:
pets.com/dogs
\u2192 Dogs API pets.com/cats
\u2192 Cats API
Edit the Envoy config to extend the external authorization settings at the level of the routes, with the host
value that will be favored by Authorino before the actual host attribute of the HTTP request:
virtual_hosts:\n\n- name: pets-api\n domains: ['pets.com']\n routes:\n - match:\n prefix: /dogs\n route:\n cluster: dogs-api\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: dogs.pets.com\n - match:\n prefix: /cats\n route:\n cluster: cats-api\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: cats.pets.com\n
Create the AuthConfig for the Pets API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: dogs-api-protection\nspec:\n hosts:\n\n - dogs.pets.com\n\n authentication: [...]\n
Create the AuthConfig for the Cats API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: cats-api-protection\nspec:\n hosts:\n\n - cats.pets.com\n\n authentication: [...]\n
Notice that the host subdomains dogs.pets.com
and cats.pets.com
are not really requested by the API consumers. Rather, users send requests to pets.com/dogs
and pets.com/cats
. When routing those requests, Envoy makes sure to inject the corresponding context extensions that will induce the right lookup in Authorino.
"},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-wildcard-subdomain-lookup","title":"Example of host override for wildcard subdomain lookup","text":"In this use case, a single Pets API serves requests for any subdomain that matches *.pets.com
, e.g.:
dogs.pets.com
\u2192 Pets API cats.pets.com
\u2192 Pets API
Edit the Envoy config to extend the external authorization settings at the level of the virtual host, with the host
value that will be favored by Authorino before the actual host attribute of the HTTP request:
virtual_hosts:\n\n- name: pets-api\n domains: ['*.pets.com']\n typed_per_filter_config:\n envoy.filters.http.ext_authz:\n \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n check_settings:\n context_extensions:\n host: pets.com\n routes:\n - match:\n prefix: /\n route:\n cluster: pets-api\n
The host
context extension used above is any key that matches one of the hosts listed in the targeted AuthConfig.
Create the AuthConfig for the Pets API:
apiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: pets-api-protection\nspec:\n hosts:\n\n - pets.com\n\n authentication: [...]\n
Notice that requests to dogs.pets.com
and to cats.pets.com
are all routed by Envoy to the same API, with same external authorization configuration. in all the cases, Authorino will lookup for the indexed AuthConfig associated with pets.com
. The same is valid for a request sent, e.g., to birds.pets.com
.
-
For further details about Authorino lookup of AuthConfig, check out Host lookup.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/http-basic-authentication/","title":"User guide: HTTP \"Basic\" Authentication (RFC 7235)","text":"Turn Authorino API key Secret
s settings into HTTP basic auth.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 API key
- Authorization \u2192 Pattern-matching authorization
HTTP \"Basic\" Authentication (RFC 7235) is not recommended if you can afford other more secure methods such as OpenID Connect. To support legacy nonetheless it is sometimes necessary to implement it.
In Authorino, HTTP \"Basic\" Authentication can be modeled leveraging the API key authentication feature (stored as Kubernetes Secret
s with an api_key
entry and labeled to match selectors specified in spec.identity.apiKey.selector
of the AuthConfig
).
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The config uses API Key secrets to store base64-encoded username:password
HTTP \"Basic\" authentication credentials. The config also specifies an Access Control List (ACL) by which only user john
is authorized to consume the /bye
endpoint of the API.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"http-basic-auth\":\n apiKey:\n selector:\n matchLabels:\n group: users\n credentials:\n authorizationHeader:\n prefix: Basic\n authorization:\n \"acl\":\n when:\n - selector: context.request.http.path\n operator: eq\n value: /bye\n patternMatching:\n patterns:\n - selector: context.request.http.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\"}\n operator: eq\n value: john\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON, including the description of the string modifiers @extract
and @case
used above. Check out as well the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-user-credentials","title":"\u277b Create user credentials","text":"To create credentials for HTTP \"Basic\" Authentication, store each username:password
, base64-encoded, in the api_key
value of the Kubernetes Secret
resources. E.g.:
printf \"john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" | base64\n# am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA==\n
Create credentials for user John:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: basic-auth-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA== # john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
Create credentials for user Jane:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: basic-auth-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: users\nstringData:\n api_key: amFuZTpkTnNScnNhcHkwbk5Dd210NTM3ZkhGcHl4MGNCc0xFcA== # jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#consume-the-api","title":"\u277c Consume the API","text":"As John (authorized in the ACL):
curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/bye\n# HTTP/1.1 200 OK\n
As Jane (NOT authorized in the ACL):
curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/bye -i\n# HTTP/1.1 403 Forbidden\n
With an invalid user/password:
curl -u unknown:invalid http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"http-basic-auth\"\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#revoke-access-to-the-api","title":"\u277d Revoke access to the API","text":"kubectl delete secret/basic-auth-1\n
"},{"location":"authorino/docs/user-guides/http-basic-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/basic-auth-1\nkubectl delete secret/basic-auth-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/injecting-data/","title":"User guide: Injecting data in the request","text":"Inject HTTP headers with serialized JSON content.
Authorino capabilities featured in this guide: - Dynamic response \u2192 JSON injection
- Identity verification & authentication \u2192 API key
Inject serialized custom JSON objects as HTTP request headers. Values can be static or fetched from the Authorization JSON.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/injecting-data/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/injecting-data/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/injecting-data/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/injecting-data/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/injecting-data/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/injecting-data/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The following defines a JSON object to be injected as an added HTTP header into the request, named after the response config x-ext-auth-data
. The object includes 3 properties:
- a static value
authorized: true
; - a dynamic value
request-time
, from Envoy-supplied contextual data present in the Authorization JSON; and - a greeting message
geeting-message
that interpolates a dynamic value read from an annotation of the Kubernetes Secret
resource that represents the API key used to authenticate into a static string.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n response:\n success:\n headers:\n \"x-ext-auth-data\":\n json:\n properties:\n \"authorized\":\n value: true\n \"request-time\":\n selector: context.request.time.seconds\n \"greeting-message\":\n selector: Hello, {auth.identity.metadata.annotations.auth-data\\/name}!\nEOF\n
Check out the docs for information about the common feature JSON paths for reading from the Authorization JSON.
"},{"location":"authorino/docs/user-guides/injecting-data/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\n annotations:\n auth-data/name: Rita\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/injecting-data/#consume-the-api","title":"\u277c Consume the API","text":"curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# {\n# \"method\": \"GET\",\n# \"path\": \"/hello\",\n# \"query_string\": null,\n# \"body\": \"\",\n# \"headers\": {\n# \u2026\n# \"X-Ext-Auth-Data\": \"{\\\"authorized\\\":true,\\\"greeting-message\\\":\\\"Hello, Rita!\\\",\\\"request-time\\\":1637954644}\",\n# },\n# \u2026\n# }\n
"},{"location":"authorino/docs/user-guides/injecting-data/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/","title":"User guide: Simple pattern-matching authorization policies","text":"Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.
Authorino capabilities featured in this guide: - Authorization \u2192 Pattern-matching authorization
- Identity verification & authentication \u2192 JWT verification
Authorino provides a built-in authorization module to check simple pattern-matching rules against the Authorization JSON. This is an alternative to OPA when all you want is to check for some simple rules, without complex logics, such as match the value of a JWT claim.
Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The email-verified-only
authorization policy ensures that users consuming the API from a given network (IP range 192.168.1/24) must have their emails verified.
The email_verified
claim is a property of the identity added to the JWT by the OpenID Connect issuer.
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"email-verified-only\":\n when:\n - selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\": \\\",\\\"}\"\n operator: matches\n value: 192\\\\.168\\\\.1\\\\.\\\\d+\n patternMatching:\n patterns:\n - selector: auth.identity.email_verified\n operator: eq\n value: \"true\"\nEOF\n
Check out the docs for information about semantics and operators supported by the JSON pattern-matching authorization feature, as well the common feature JSON paths for reading from the Authorization JSON, including the description of the string modifier @extract
used above. Check out as well the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-jane-email-verified","title":"Obtain an access token and consume the API as Jane (email verified)","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As Jane, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As Jane, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-peter-email-not-verified","title":"Obtain an access token and consume the API as Peter (email NOT verified)","text":"Obtain an access token with the Keycloak server for Peter:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Peter, consume the API outside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As Peter, consume the API inside the area where the policy applies:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete namespace keycloak\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/","title":"User guide: Authorization with Keycloak Authorization Services","text":"Keycloak provides a powerful set of tools (REST endpoints and administrative UIs), also known as Keycloak Authorization Services, to manage and enforce authorization, workflows for multiple access control mechanisms, including discretionary user access control and user-managed permissions.
This user guide is an example of how to use Authorino as an adapter to Keycloak Authorization Services while still relying on the reverse-proxy integration pattern, thus not involving importing an authorization library nor rebuilding the application's code.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Keycloak server
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Forward local requests to Keycloak running inside the cluster (if using Kind):
kubectl -n keycloak port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, Authorino will accept access tokens (JWTs) issued by the Keycloak server. These JWTs can be either normal Keycloak ID tokens or Requesting Party Tokens (RPT).
RPTs include claims about the permissions of the user regarding protected resources and scopes associated with a Keycloak authorization client that the user can access.
When the supplied access token is an RPT, Authorino will just validate whether the user's granted permissions present in the token include the requested resource ID (translated from the path) and scope (inferred from the HTTP method). If the token does not contain a permissions
claim (i.e. it is not an RPT), Authorino will negotiate a User-Managed Access (UMA) ticket on behalf of the user and try to obtain an RPT on that UMA ticket.
In cases of asynchronous user-managed permission control, the first request to the API using a normal Keycloak ID token is denied by Authorino. The user that owns the resource acknowledges the access request in the Keycloak UI. If access is granted, the new permissions will be reflected in subsequent RPTs obtained by Authorino on behalf of the requesting party.
Whenever an RPT with proper permissions is obtained by Authorino, the RPT is supplied back to the API consumer, so it can be used in subsequent requests thus skipping new negotiations of UMA tickets.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"uma\":\n opa:\n rego: |\n pat := http.send({\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\": \"post\",\"headers\":{\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":\"grant_type=client_credentials\"}).body.access_token\n resource_id := http.send({\"url\":concat(\"\",[\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/resource_set?uri=\",input.context.request.http.path]),\"method\":\"get\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat])}}).body[0]\n scope := lower(input.context.request.http.method)\n access_token := trim_prefix(input.context.request.http.headers.authorization, \"Bearer \")\n\n default rpt = \"\"\n rpt = access_token { object.get(input.auth.identity, \"authorization\", {}).permissions }\n else = rpt_str {\n ticket := http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/permission\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat]),\"Content-Type\":\"application/json\"},\"raw_body\":concat(\"\",[\"[{\\\"resource_id\\\":\\\"\",resource_id,\"\\\",\\\"resource_scopes\\\":[\\\"\",scope,\"\\\"]}]\"])}).body.ticket\n rpt_str := object.get(http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",access_token]),\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":concat(\"\",[\"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&ticket=\",ticket,\"&submit_request=true\"])}).body, \"access_token\", \"\")\n }\n\n allow {\n permissions := object.get(io.jwt.decode(rpt)[1], \"authorization\", { \"permissions\": [] }).permissions\n permissions[i]\n permissions[i].rsid = resource_id\n permissions[i].scopes[_] = scope\n }\n allValues: true\n response:\n success:\n headers:\n \"x-keycloak\":\n when:\n\n - selector: auth.identity.authorization.permissions\n operator: eq\n value: \"\"\n json:\n properties:\n \"rpt\":\n selector: auth.authorization.uma.rpt\nEOF\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for user Jane:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#consume-the-api","title":"\u277c Consume the API","text":"As Jane, try to send a GET
request to the protected resource /greetings/1
, owned by user John.
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n
As John, log in to http://localhost:8080/realms/kuadrant/account in the web browser (username: john
/ password: p
), and grant access to the resource greeting-1
for Jane. A pending permission request by Jane shall exist in the list of John's Resources.
As Jane, try to consume the protected resource /greetings/1
again:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n#\n# {\u2026\n# \"headers\": {\u2026\n# \"X-Keycloak\": \"{\\\"rpt\\\":\\\"<RPT>\", \u2026\n
Copy the RPT from the response and repeat the request now using the RPT to authenticate:
curl -H \"Authorization: Bearer <RPT>\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/","title":"User guide: Kubernetes RBAC for service authorization (SubjectAccessReview API)","text":"Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.
Authorino capabilities featured in this guide: - Authorization \u2192 Kubernetes SubjectAccessReview
- Identity verification & authentication \u2192 Kubernetes TokenReview
Authorino can delegate authorization decision to the Kubernetes authorization system, allowing permissions to be stored and managed using the Kubernetes Role-Based Access Control (RBAC) for example. The feature is based on the SubjectAccessReview
API and can be used for resourceAttributes
(parameters defined in the AuthConfig
) or nonResourceAttributes
(inferring HTTP path and verb from the original request).
Check out as well the user guide about Authentication with Kubernetes tokens (TokenReview API).
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create
TokenRequest
s (to consume the protected service from outside the cluster) - jq
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
The AuthConfig
below sets all Kubernetes service accounts as trusted users of the API, and relies on the Kubernetes RBAC to enforce authorization using Kubernetes SubjectAccessReview API for non-resource endpoints:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n - envoy.default.svc.cluster.local\n authentication:\n \"service-accounts\":\n kubernetesTokenReview:\n audiences: [\"https://kubernetes.default.svc.cluster.local\"]\n authorization:\n \"k8s-rbac\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.user.username\nEOF\n
Check out the spec for the Authorino Kubernetes SubjectAccessReview authorization feature, for resource attributes permission checks where SubjectAccessReviews issued by Authorino are modeled in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb).
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-roles-associated-with-endpoints-of-the-api","title":"\u277b Create roles associated with endpoints of the API","text":"Because the k8s-rbac
policy defined in the AuthConfig
in the previous step is for non-resource access review requests, the corresponding roles and role bindings have to be defined at cluster scope.
Create a talker-api-greeter
role whose users and service accounts bound to this role can consume the non-resource endpoints POST /hello
and POST /hi
of the API:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: talker-api-greeter\nrules:\n\n- nonResourceURLs: [\"/hello\"]\n verbs: [\"post\"]\n- nonResourceURLs: [\"/hi\"]\n verbs: [\"post\"]\nEOF\n
Create a talker-api-speaker
role whose users and service accounts bound to this role can consume the non-resource endpoints POST /say/*
of the API:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: talker-api-speaker\nrules:\n\n- nonResourceURLs: [\"/say/*\"]\n verbs: [\"post\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-the-serviceaccounts-and-permissions-to-consume-the-api","title":"\u277c Create the ServiceAccount
s and permissions to consume the API","text":"Create service accounts api-consumer-1
and api-consumer-2
:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-1\nEOF\n
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-2\nEOF\n
Bind both service accounts to the talker-api-greeter
role:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: talker-api-greeter-rolebinding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: talker-api-greeter\nsubjects:\n\n- kind: ServiceAccount\n name: api-consumer-1\n namespace: default\n- kind: ServiceAccount\n name: api-consumer-2\n namespace: default\nEOF\n
Bind service account api-consumer-1
to the talker-api-speaker
role:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: talker-api-speaker-rolebinding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: talker-api-speaker\nsubjects:\n\n- kind: ServiceAccount\n name: api-consumer-1\n namespace: default\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#consume-the-api","title":"\u277d Consume the API","text":"Run a pod that consumes one of the greeting endpoints of the API from inside the cluster, as service account api-consumer-1
, bound to the talker-api-greeter
and talker-api-speaker
cluster roles in the Kubernetes RBAC:
kubectl run greeter --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/hi\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-1\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 200\n
Run a pod that sends a POST
request to /say/blah
from within the cluster, as service account api-consumer-1
:
kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-1\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 200\n
Run a pod that sends a POST
request to /say/blah
from within the cluster, as service account api-consumer-2
, bound only to the talker-api-greeter
cluster role in the Kubernetes RBAC:
kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n \"apiVersion\": \"v1\",\n \"spec\": {\n \"containers\": [{\n \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n }],\n \"serviceAccountName\": \"api-consumer-2\",\n \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n }\n}' -- sh\n# Sending...\n# 403\n
Extra: consume the API as service account api-consumer-2
from outside the cluster Obtain a short-lived access token for service account api-consumer-2
, bound to the talker-api-greeter
cluster role in the Kubernetes RBAC, using the Kubernetes TokenRequest API:
export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-2/token -f - | jq -r .status.token)\n
Consume the API as api-consumer-2
from outside the cluster:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/say/something -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete serviceaccount/api-consumer-1\nkubectl delete serviceaccount/api-consumer-2\nkubectl delete clusterrolebinding/talker-api-greeter-rolebinding\nkubectl delete clusterrolebinding/talker-api-speaker-rolebinding\nkubectl delete clusterrole/talker-api-greeter\nkubectl delete clusterrole/talker-api-speaker\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/","title":"User guide: Authentication with Kubernetes tokens (TokenReview API)","text":"Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Kubernetes TokenReview
Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).
These tokens can be either ServiceAccount
tokens or any valid user access tokens issued to users of the Kubernetes server API.
The audiences
claim of the token must include the requested host and port of the protected API (default), or all audiences specified in spec.identity.kubernetes.audiences
of the AuthConfig
.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create
TokenRequest
s (to consume the protected service from outside the cluster) - jq
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n - envoy.default.svc.cluster.local\n authentication:\n \"authorized-service-accounts\":\n kubernetesTokenReview:\n audiences:\n - talker-api\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-protected-by-authorino","title":"\u277b Consume the API protected by Authorino","text":""},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-a-serviceaccount","title":"Create a ServiceAccount
","text":"Create a Kubernetes ServiceAccount
to identify the consumer application that will send requests to the protected API:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: api-consumer-1\nEOF\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-outside-the-cluster","title":"Consume the API from outside the cluster","text":"Obtain a short-lived access token for the api-consumer-1
service account:
export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"audiences\": [\"talker-api\"], \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-1/token -f - | jq -r .status.token)\n
Consume the API with a valid Kubernetes token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
Consume the API with the Kubernetes token expired (10 minutes):
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"authorized-service-accounts\"\n# x-ext-auth-reason: Not authenticated\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-inside-the-cluster","title":"Consume the API from inside the cluster","text":"Deploy an application that consumes an endpoint of the Talker API, in a loop, every 10 seconds. The application uses a short-lived service account token mounted inside the container using Kubernetes Service Account Token Volume Projection to authenticate.
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Pod\nmetadata:\n name: api-consumer\nspec:\n containers:\n\n - name: api-consumer\n image: quay.io/kuadrant/authorino-examples:api-consumer\n command: [\"./run\"]\n args:\n - --endpoint=http://envoy.default.svc.cluster.local:8000/hello\n - --token-path=/var/run/secrets/tokens/api-token\n - --interval=10\n volumeMounts:\n - mountPath: /var/run/secrets/tokens\n name: talker-api-access-token\n serviceAccountName: api-consumer-1\n volumes:\n - name: talker-api-access-token\n projected:\n sources:\n - serviceAccountToken:\n path: api-token\n expirationSeconds: 7200\n audience: talker-api\nEOF\n
Check the logs of api-consumer
:
kubectl logs -f api-consumer\n# Sending...\n# 200\n# 200\n# 200\n# 200\n# ...\n
"},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete pod/api-consumer\nkubectl delete serviceaccount/api-consumer-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/mtls-authentication/","title":"User guide: Authentication with X.509 certificates and Mutual Transport Layer Security (mTLS)","text":"Verify client X.509 certificates against trusted root CAs stored in Kubernetes Secret
s to authenticate access to APIs protected with Authorino.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 X.509 client certificate authentication
- Authorization \u2192 Pattern-matching authorization
Authorino can verify x509 certificates presented by clients for authentication on the request to the protected APIs, at application level.
Trusted root Certificate Authorities (CA) are stored as Kubernetes kubernetes.io/tls
Secrets labeled according to selectors specified in the AuthConfig, watched and cached by Authorino.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/mtls-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.
At step \u277b, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/mtls-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following commands will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS enabled3.
Create the TLS certificates for the Authorino service:
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/default/g\" | kubectl apply -f -\n
Request the Authorino instance:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#create-a-ca","title":"\u2779 Create a CA","text":"Create a CA (Certificate Authority) certificate to issue the client certificates that will be used to authenticate clients that send requests to the Talker API:
openssl req -x509 -sha512 -nodes \\\n -days 365 \\\n -newkey rsa:4096 \\\n -subj \"/CN=talker-api-ca\" \\\n -addext basicConstraints=CA:TRUE \\\n -addext keyUsage=digitalSignature,keyCertSign \\\n -keyout /tmp/ca.key \\\n -out /tmp/ca.crt\n
Store the CA cert in a Kubernetes Secret
, labeled to be discovered by Authorino and to be mounted in the file system of the Envoy container:
kubectl create secret tls talker-api-ca --cert=/tmp/ca.crt --key=/tmp/ca.key\nkubectl label secret talker-api-ca authorino.kuadrant.io/managed-by=authorino app=talker-api\n
Prepare an extension file for the client certificate signing requests:
cat > /tmp/x509v3.ext << EOF\nauthorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nkeyUsage=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment\nextendedKeyUsage=clientAuth\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#setup-envoy","title":"\u277a Setup Envoy","text":"The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: envoy\n name: envoy\ndata:\n envoy.yaml: |\n static_resources:\n listeners:\n\n - address:\n socket_address:\n address: 0.0.0.0\n port_value: 8443\n filter_chains:\n - transport_socket:\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n common_tls_context:\n tls_certificates:\n - certificate_chain: {filename: \"/etc/ssl/certs/talker-api/tls.crt\"}\n private_key: {filename: \"/etc/ssl/certs/talker-api/tls.key\"}\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/talker-api/tls.crt\n filters:\n - name: envoy.http_connection_manager\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n stat_prefix: local\n route_config:\n name: local_route\n virtual_hosts:\n - name: local_service\n domains: ['*']\n routes:\n - match: { prefix: / }\n route: { cluster: talker-api }\n http_filters:\n - name: envoy.filters.http.ext_authz\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n transport_api_version: V3\n failure_mode_allow: false\n include_peer_certificate: true\n grpc_service:\n envoy_grpc: { cluster_name: authorino }\n timeout: 1s\n - name: envoy.filters.http.router\n typed_config: {}\n use_remote_address: true\n clusters:\n - name: authorino\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n http2_protocol_options: {}\n load_assignment:\n cluster_name: authorino\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: authorino-authorino-authorization\n port_value: 50051\n transport_socket:\n name: envoy.transport_sockets.tls\n typed_config:\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n common_tls_context:\n validation_context:\n trusted_ca:\n filename: /etc/ssl/certs/authorino-ca-cert.crt\n - name: talker-api\n connect_timeout: 0.25s\n type: strict_dns\n lb_policy: round_robin\n load_assignment:\n cluster_name: talker-api\n endpoints:\n - lb_endpoints:\n - endpoint:\n address:\n socket_address:\n address: talker-api\n port_value: 3000\n admin:\n access_log_path: \"/tmp/admin_access.log\"\n address:\n socket_address:\n address: 0.0.0.0\n port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: envoy\n name: envoy\nspec:\n selector:\n matchLabels:\n app: envoy\n template:\n metadata:\n labels:\n app: envoy\n spec:\n containers:\n - args:\n - --config-path /usr/local/etc/envoy/envoy.yaml\n - --service-cluster front-proxy\n - --log-level info\n - --component-log-level filter:trace,http:debug,router:debug\n command:\n - /usr/local/bin/envoy\n image: envoyproxy/envoy:v1.19-latest\n name: envoy\n ports:\n - containerPort: 8443\n name: web\n - containerPort: 8001\n name: admin\n volumeMounts:\n - mountPath: /usr/local/etc/envoy\n name: config\n readOnly: true\n - mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n name: authorino-ca-cert\n readOnly: true\n subPath: ca.crt\n - mountPath: /etc/ssl/certs/talker-api\n name: talker-api-ca\n readOnly: true\n volumes:\n - configMap:\n items:\n - key: envoy.yaml\n path: envoy.yaml\n name: envoy\n name: config\n - name: authorino-ca-cert\n secret:\n defaultMode: 420\n secretName: authorino-ca-cert\n - name: talker-api-ca\n secret:\n defaultMode: 420\n secretName: talker-api-ca\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: envoy\nspec:\n selector:\n app: envoy\n ports:\n - name: web\n port: 8443\n protocol: TCP\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: ingress-wildcard-host\nspec:\n rules:\n - host: talker-api.127.0.0.1.nip.io\n http:\n paths:\n - backend:\n service:\n name: envoy\n port: { number: 8443 }\n path: /\n pathType: Prefix\nEOF\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8443 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8443:8443 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#create-the-authconfig","title":"\u277b Create the AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"mtls\":\n x509:\n selector:\n matchLabels:\n app: talker-api\n authorization:\n \"acme\":\n patternMatching:\n patterns:\n - selector: auth.identity.Organization\n operator: incl\n value: ACME Inc.\nEOF\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a TLS certificate signed by the trusted CA:
openssl genrsa -out /tmp/aisha.key 4096\nopenssl req -new -subj \"/CN=aisha/C=PK/L=Islamabad/O=ACME Inc./OU=Engineering\" -key /tmp/aisha.key -out /tmp/aisha.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt\n\ncurl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 200 OK\n
With a TLS certificate signed by the trusted CA, though missing an authorized Organization:
openssl genrsa -out /tmp/john.key 4096\nopenssl req -new -subj \"/CN=john/C=UK/L=London\" -key /tmp/john.key -out /tmp/john.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt\n\ncurl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#try-the-authconfig-via-raw-http-authorization-interface","title":"\u277d Try the AuthConfig via raw HTTP authorization interface","text":"Expose Authorino's raw HTTP authorization to the local host:
kubectl port-forward service/authorino-authorino-authorization 5001:5001 2>&1 >/dev/null &\n
With a TLS certificate signed by the trusted CA:
curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 200\n
With a TLS certificate signed by an unknown authority:
openssl req -x509 -sha512 -nodes \\\n -days 365 \\\n -newkey rsa:4096 \\\n -subj \"/CN=untrusted\" \\\n -addext basicConstraints=CA:TRUE \\\n -addext keyUsage=digitalSignature,keyCertSign \\\n -keyout /tmp/untrusted-ca.key \\\n -out /tmp/untrusted-ca.crt\n\nopenssl genrsa -out /tmp/niko.key 4096\nopenssl req -new -subj \"/CN=niko/C=JP/L=Osaka\" -key /tmp/niko.key -out /tmp/niko.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt\n\ncurl -k --cert /tmp/niko.crt --key /tmp/niko.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 401\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#revoke-an-entire-chain-of-certificates","title":"\u277e Revoke an entire chain of certificates","text":"kubectl delete secret/talker-api-ca\n
Even if the deleted root certificate is still cached and accepted at the gateway, Authorino will revoke access at application level immediately.
Try with a previously accepted certificate:
curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
"},{"location":"authorino/docs/user-guides/mtls-authentication/#cleanup","title":"Cleanup","text":"kind delete cluster --name authorino-tutorial\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/","title":"User guide: OAuth 2.0 token introspection (RFC 7662)","text":"Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 OAuth 2.0 introspection
- Authorization \u2192 Pattern-matching authorization
Authorino can perform OAuth 2.0 token introspection (RFC 7662) on the access tokens supplied in the requests to protected APIs. This is particularly useful when using opaque tokens, for remote checking the token validity and resolving the identity object.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- OAuth 2.0 server that implements the token introspection endpoint (RFC 7662) (e.g. Keycloak or a12n-server)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy a Keycloak server preloaded with the realm settings required for this guide:
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
Deploy an a12n-server server preloaded with all settings required for this guide:
kubectl create namespace a12n-server\nkubectl -n a12n-server apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create the required secrets that will be used by Authorino to authenticate with Keycloak and a12n-server during the introspection request:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: oauth2-token-introspection-credentials-keycloak\nstringData:\n clientID: talker-api\n clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: oauth2-token-introspection-credentials-a12n-server\nstringData:\n clientID: talker-api\n clientSecret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g\ntype: Opaque\nEOF\n
Create the Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak\":\n oauth2Introspection:\n endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token/introspect\n tokenTypeHint: requesting_party_token\n credentialsRef:\n name: oauth2-token-introspection-credentials-keycloak\n \"a12n-server\":\n oauth2Introspection:\n endpoint: http://a12n-server.a12n-server.svc.cluster.local:8531/introspect\n credentialsRef:\n name: oauth2-token-introspection-credentials-a12n-server\n authorization:\n \"can-read\":\n when:\n - selector: auth.identity.privileges\n operator: neq\n value: \"\"\n patternMatching:\n patterns:\n - selector: auth.identity.privileges.talker-api\n operator: incl\n value: read\nEOF\n
On every request, Authorino will try to verify the token remotely with the Keycloak server and the a12n-server server.
For authorization, whenever the introspected token data includes a privileges
property (returned by a12n-server), Authorino will enforce only consumers whose privileges.talker-api
includes the \"read\"
permission are granted access.
Check out the docs for information about the common feature Conditions about skipping parts of an AuthConfig
in the auth pipeline based on context.
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-keycloak-and-consume-the-api","title":"Obtain an access token with Keycloak and consume the API","text":"Obtain an access token with the Keycloak server for user Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As user Jane, consume the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-a12n-server-and-consume-the-api","title":"Obtain an access token with a12n-server and consume the API","text":"Forward local requests to a12n-server instance running in the cluster:
kubectl -n a12n-server port-forward deployment/a12n-server 8531:8531 2>&1 >/dev/null &\n
Obtain an access token with the a12n-server server for service account service-account-1
:
ACCESS_TOKEN=$(curl -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/token\" | jq -r .access_token)\n
You can as well obtain an access token from within the cluster, in case your a12n-server is not reachable from the outside:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://a12n-server.a12n-server.svc.cluster.local:8531/token -s -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s | jq -r .access_token)\n
Verify the issued token is an opaque access token in this case:
echo $ACCESS_TOKEN\n
As service-account-1
, consumer the API with a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
curl -d \"token=$ACCESS_TOKEN\" -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/revoke\" -i\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#consume-the-api-with-a-missing-or-invalid-access-token","title":"Consume the API with a missing or invalid access token","text":"curl -H \"Authorization: Bearer invalid\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
"},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete secret/oauth2-token-introspection-credentials-keycloak\nkubectl delete secret/oauth2-token-introspection-credentials-a12n-server\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\nkubectl delete namespace a12n-server\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/observability/","title":"Observability","text":""},{"location":"authorino/docs/user-guides/observability/#metrics","title":"Metrics","text":"Authorino exports metrics at 2 endpoints:
/metrics Metrics of the controller-runtime about reconciliation (caching) of AuthConfigs and API key Secrets /server-metrics Metrics of the external authorization gRPC and OIDC/Festival Wristband validation built-in HTTP servers The Authorino Operator creates a Kubernetes Service
named <authorino-cr-name>-controller-metrics
that exposes the endpoints on port 8080. The Authorino instance allows to modify the port number of the metrics endpoints, by setting the --metrics-addr
command-line flag (default: :8080
).
Main metrics exported by endpoint1:
Endpoint: /metrics
Metric name Description\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 Labels Type controller_runtime_reconcile_total Total number of reconciliations per controller controller=authconfig|secret
, result=success|error|requeue
counter controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller controller=authconfig|secret
counter controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller controller=authconfig|secret
histogram controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller controller=authconfig|secret
gauge workqueue_adds_total Total number of adds handled by workqueue name=authconfig|secret
counter workqueue_depth Current depth of workqueue name=authconfig|secret
gauge workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested name=authconfig|secret
histogram workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running. name=authconfig|secret
gauge workqueue_retries_total Total number of retries handled by workqueue name=authconfig|secret
counter workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. name=authconfig|secret
gauge workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes. name=authconfig|secret
histogram rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. code=200|404
, method=GET|PUT|POST
counter Endpoint: /server-metrics
Metric name Description Labels Type auth_server_evaluator_total2 Total number of evaluations of individual authconfig rule performed by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_cancelled2 Number of evaluations of individual authconfig rule cancelled by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_ignored2 Number of evaluations of individual authconfig rule ignored by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_denied2 Number of denials from individual authconfig rule evaluated by the auth server. namespace
, authconfig
, evaluator_type
, evaluator_name
counter auth_server_evaluator_duration_seconds2 Response latency of individual authconfig rule evaluated by the auth server (in seconds). namespace
, authconfig
, evaluator_type
, evaluator_name
histogram auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig. namespace
, authconfig
counter auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig. namespace
, authconfig
, status=OK|UNAUTHENTICATED,PERMISSION_DENIED
counter auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds). namespace
, authconfig
histogram auth_server_response_status Response status of authconfigs sent by the auth server. status=OK|UNAUTHENTICATED,PERMISSION_DENIED|NOT_FOUND
counter grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure. grpc_code=OK|Aborted|Canceled|DeadlineExceeded|Internal|ResourceExhausted|Unknown
, grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_handling_seconds Response latency (seconds) of gRPC that had been application-level handled by the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
histogram grpc_server_msg_received_total Total number of RPC stream messages received on the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter grpc_server_started_total Total number of RPCs started on the server. grpc_method=Check
, grpc_service=envoy.service.auth.v3.Authorization
counter http_server_handled_total Total number of calls completed on the raw HTTP authorization server, regardless of success or failure. http_code
counter http_server_handling_seconds Response latency (seconds) of raw HTTP authorization request that had been application-level handled by the server. histogram oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server. namespace
, authconfig
, wristband
, path=oidc-config|jwks
counter oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server. status=200|404
counter 1 Both endpoints export metrics about the Go runtime, such as number of goroutines (go_goroutines) and threads (go_threads), usage of CPU, memory and GC stats.
2 Opt-in metrics: auth_server_evaluator_*
metrics require authconfig.spec.(identity|metadata|authorization|response).metrics: true
(default: false
). This can be enforced for the entire instance (all AuthConfigs and evaluators), by setting the --deep-metrics-enabled
command-line flag in the Authorino deployment.
Example of metrics exported at the /metrics
endpoint # HELP controller_runtime_active_workers Number of currently used workers per controller\n# TYPE controller_runtime_active_workers gauge\ncontroller_runtime_active_workers{controller=\"authconfig\"} 0\ncontroller_runtime_active_workers{controller=\"secret\"} 0\n# HELP controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller\n# TYPE controller_runtime_max_concurrent_reconciles gauge\ncontroller_runtime_max_concurrent_reconciles{controller=\"authconfig\"} 1\ncontroller_runtime_max_concurrent_reconciles{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller\n# TYPE controller_runtime_reconcile_errors_total counter\ncontroller_runtime_reconcile_errors_total{controller=\"authconfig\"} 12\ncontroller_runtime_reconcile_errors_total{controller=\"secret\"} 0\n# HELP controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller\n# TYPE controller_runtime_reconcile_time_seconds histogram\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.01\"} 11\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.025\"} 17\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.05\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.15\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.35\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.45\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.6\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.7\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.8\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.9\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.75\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"5\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"6\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"7\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"8\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"9\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"10\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"15\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"20\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"25\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"30\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"40\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"50\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"60\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"+Inf\"} 19\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"authconfig\"} 5.171108321999999\ncontroller_runtime_reconcile_time_seconds_count{controller=\"authconfig\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.01\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.025\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.05\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.35\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.45\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.75\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"10\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"20\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"30\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"40\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"50\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"60\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"+Inf\"} 1\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"secret\"} 0.000138025\ncontroller_runtime_reconcile_time_seconds_count{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_total Total number of reconciliations per controller\n# TYPE controller_runtime_reconcile_total counter\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"error\"} 12\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"success\"} 7\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"error\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"success\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 13\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 13\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000140699\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000313162\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003671076\ngo_gc_duration_seconds_count 13\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6357\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 45065\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 128306\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 128327\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.5021512e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 128327\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.5021512e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 128327\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3885\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 33418\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 96417\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 96425\ngo_gc_heap_frees_by_size_bytes_total_sum 9.880944e+06\ngo_gc_heap_frees_by_size_bytes_total_count 96425\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.880944e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 96425\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.356624e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31902\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 11750\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 26\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 26\ngo_gc_pauses_seconds_total_sum 0.003151488\ngo_gc_pauses_seconds_total_count 26\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 80\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 589824\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.140568e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 4.005888e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.0602e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 17104\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 113968\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.140568e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.5021512e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 108175\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.140568e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.595712e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.200768e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31902\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 4.005888e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461572121033354e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 140077\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 113968\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.356624e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 80\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 244\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 244\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 2336\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 2336\ngo_sched_latencies_seconds_sum 0.18509832400000004\ngo_sched_latencies_seconds_count 2336\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.84\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.3728896e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.\n# TYPE rest_client_requests_total counter\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"GET\"} 114\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"PUT\"} 4\n# HELP workqueue_adds_total Total number of adds handled by workqueue\n# TYPE workqueue_adds_total counter\nworkqueue_adds_total{name=\"authconfig\"} 19\nworkqueue_adds_total{name=\"secret\"} 1\n# HELP workqueue_depth Current depth of workqueue\n# TYPE workqueue_depth gauge\nworkqueue_depth{name=\"authconfig\"} 0\nworkqueue_depth{name=\"secret\"} 0\n# HELP workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running.\n# TYPE workqueue_longest_running_processor_seconds gauge\nworkqueue_longest_running_processor_seconds{name=\"authconfig\"} 0\nworkqueue_longest_running_processor_seconds{name=\"secret\"} 0\n# HELP workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested\n# TYPE workqueue_queue_duration_seconds histogram\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 8\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_queue_duration_seconds_sum{name=\"authconfig\"} 4.969016371\nworkqueue_queue_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_queue_duration_seconds_sum{name=\"secret\"} 4.67e-06\nworkqueue_queue_duration_seconds_count{name=\"secret\"} 1\n# HELP workqueue_retries_total Total number of retries handled by workqueue\n# TYPE workqueue_retries_total counter\nworkqueue_retries_total{name=\"authconfig\"} 12\nworkqueue_retries_total{name=\"secret\"} 0\n# HELP workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases.\n# TYPE workqueue_unfinished_work_seconds gauge\nworkqueue_unfinished_work_seconds{name=\"authconfig\"} 0\nworkqueue_unfinished_work_seconds{name=\"secret\"} 0\n# HELP workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes.\n# TYPE workqueue_work_duration_seconds histogram\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 11\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_work_duration_seconds_sum{name=\"authconfig\"} 5.171738079000001\nworkqueue_work_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_work_duration_seconds_sum{name=\"secret\"} 0.000150956\nworkqueue_work_duration_seconds_count{name=\"secret\"} 1\n
Example of metrics exported at the /server-metrics
endpoint # HELP auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds).\n# TYPE auth_server_authconfig_duration_seconds histogram\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.051000000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.101\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.15100000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.201\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.251\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.301\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.351\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.40099999999999997\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.45099999999999996\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.501\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.551\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6010000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6510000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7010000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7510000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8010000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8510000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9010000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9510000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"+Inf\"} 1\nauth_server_authconfig_duration_seconds_sum{authconfig=\"edge-auth\",namespace=\"authorino\"} 0.001701795\nauth_server_authconfig_duration_seconds_count{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.051000000000000004\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.101\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.15100000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.201\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.251\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.301\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.351\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.40099999999999997\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.45099999999999996\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.501\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.551\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6010000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6510000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7010000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7510000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8010000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8510000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9010000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9510000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"+Inf\"} 5\nauth_server_authconfig_duration_seconds_sum{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 0.26967658299999997\nauth_server_authconfig_duration_seconds_count{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_response_status counter\nauth_server_authconfig_response_status{authconfig=\"edge-auth\",namespace=\"authorino\",status=\"OK\"} 1\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"OK\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"PERMISSION_DENIED\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"UNAUTHENTICATED\"} 1\n# HELP auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_total counter\nauth_server_authconfig_total{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_total{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_evaluator_duration_seconds Response latency of individual authconfig rule evaluated by the auth server (in seconds).\n# TYPE auth_server_evaluator_duration_seconds histogram\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.051000000000000004\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.101\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.15100000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.201\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.251\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.301\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.351\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.40099999999999997\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.45099999999999996\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.501\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.551\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6010000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6510000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7010000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7510000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8010000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8510000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9010000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9510000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"+Inf\"} 4\nauth_server_evaluator_duration_seconds_sum{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 0.25800055\nauth_server_evaluator_duration_seconds_count{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_evaluator_total Total number of evaluations of individual authconfig rule performed by the auth server.\n# TYPE auth_server_evaluator_total counter\nauth_server_evaluator_total{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_response_status Response status of authconfigs sent by the auth server.\n# TYPE auth_server_response_status counter\nauth_server_response_status{status=\"NOT_FOUND\"} 1\nauth_server_response_status{status=\"OK\"} 3\nauth_server_response_status{status=\"PERMISSION_DENIED\"} 2\nauth_server_response_status{status=\"UNAUTHENTICATED\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 11\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 11\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000158594\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000324091\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003546711\ngo_gc_duration_seconds_count 11\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6261\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 42477\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 122133\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 122154\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.455944e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 122154\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.455944e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 122154\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3789\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 31067\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 91013\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 91021\ngo_gc_heap_frees_by_size_bytes_total_sum 9.399936e+06\ngo_gc_heap_frees_by_size_bytes_total_count 91021\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.399936e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 91021\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.601744e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31133\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 9866\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 22\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 22\ngo_gc_pauses_seconds_total_sum 0.0030393599999999996\ngo_gc_pauses_seconds_total_count 22\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 79\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 630784\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.159504e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 3.858432e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.14776e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 16696\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 114376\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.159504e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.455944e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 100887\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.159504e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.489216e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.307264e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31133\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 3.858432e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461569717723043e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 132020\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 114376\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.601744e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 79\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 225\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 225\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 1916\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 1916\ngo_sched_latencies_seconds_sum 0.18081453600000003\ngo_sched_latencies_seconds_count 1916\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure.\n# TYPE grpc_server_handled_total counter\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_handling_seconds Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.\n# TYPE grpc_server_handling_seconds histogram\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.005\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.01\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.025\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.05\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.1\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.25\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"1\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"2.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"10\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"+Inf\"} 7\ngrpc_server_handling_seconds_sum{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0.277605516\ngrpc_server_handling_seconds_count{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\n# HELP grpc_server_msg_received_total Total number of RPC stream messages received on the server.\n# TYPE grpc_server_msg_received_total counter\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_received_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server.\n# TYPE grpc_server_msg_sent_total counter\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_sent_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_started_total Total number of RPCs started on the server.\n# TYPE grpc_server_started_total counter\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_started_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server.\n# TYPE oidc_server_requests_total counter\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-configuration\",wristband=\"wristband\"} 1\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-connect/certs\",wristband=\"wristband\"} 1\n# HELP oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server.\n# TYPE oidc_server_response_status counter\noidc_server_response_status{status=\"200\"} 2\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.42\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.370432e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n# TYPE promhttp_metric_handler_requests_in_flight gauge\npromhttp_metric_handler_requests_in_flight 1\n# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n# TYPE promhttp_metric_handler_requests_total counter\npromhttp_metric_handler_requests_total{code=\"200\"} 1\npromhttp_metric_handler_requests_total{code=\"500\"} 0\npromhttp_metric_handler_requests_total{code=\"503\"} 0\n
"},{"location":"authorino/docs/user-guides/observability/#readiness-check","title":"Readiness check","text":"Authorino exposes two main endpoints for health and readiness check of the AuthConfig controller:
/healthz
: Health probe (ping) \u2013 reports \"ok\" if the controller is healthy. /readyz
: Readiness probe \u2013 reports \"ok\" if the controller is ready to reconcile AuthConfig-related events.
In general, the endpoints return either 200
(\"ok\", i.e. all checks have passed) or 500
(when one or more checks failed).
The default binding network address is :8081
, which can be changed by setting the command-line flag --health-probe-addr
.
The following additional subpath is available and its corresponding check can be aggregated into the response from the main readiness probe:
/readyz/authconfigs
: Aggregated readiness status of the AuthConfigs \u2013 reports \"ok\" if all AuthConfigs watched by the reconciler have been marked as ready.
Important!The AuthConfig readiness check within the scope of the aggregated readiness probe endpoint is deactivated by default \u2013 i.e. this check is an opt-in check. Sending a request to the /readyz
endpoint without explicitly opting-in for the AuthConfigs check, by using the include
parameter, will result in a response message that disregards the actual status of the watched AuthConfigs, possibly an \"ok\" message. To read the aggregated status of the watched AuthConfigs, either use the specific endpoint /readyz/authconfigs
or opt-in for the check in the aggregated endpoint by sending a request to /readyz?include=authconfigs
Apart from include
to add the aggregated status of the AuthConfigs, the following additional query string parameters are available:
verbose=true|false
- provides more verbose response messages; exclude=(check name)
\u2013 to exclude a particular readiness check (for future usage).
"},{"location":"authorino/docs/user-guides/observability/#logging","title":"Logging","text":"Authorino provides structured log messages (\"production\") or more log messages output to stdout in a more user-friendly format (\"development\" mode) and different level of logging.
"},{"location":"authorino/docs/user-guides/observability/#log-levels-and-log-modes","title":"Log levels and log modes","text":"Authorino outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information of the gRPC and HTTP authorization services, limiting messages to incoming request and respective outgoing response logs, with reduced details about the corresponding objects (request payload and authorization result), and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details of each Auth Pipeline, such as intermediary requests to validate identities with external auth servers, requests to external sources of auth metadata or authorization policies.
To configure the desired log level, set the spec.logLevel
field of the Authorino
custom resource (or --log-level
command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is info
.
Apart from log level, Authorino can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the spec.logMode
field of the Authorino
custom resource (or --log-mode
command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is production
.
Example of Authorino
custom resource with log level debug
and log mode production
:
apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n logLevel: debug\n logMode: production\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\n
"},{"location":"authorino/docs/user-guides/observability/#sensitive-data-output-to-the-logs","title":"Sensitive data output to the logs","text":"Authorino will never output HTTP headers and query string parameters to info
log messages, as such values usually include sensitive data (e.g. access tokens, API keys and Authorino Festival Wristbands). However, debug
log messages may include such sensitive information and those are not redacted.
Therefore, DO NOT USE debug
LOG LEVEL IN PRODUCTION! Instead, use either info
or error
.
"},{"location":"authorino/docs/user-guides/observability/#log-messages-printed-by-authorino","title":"Log messages printed by Authorino","text":"Some log messages printed by Authorino and corresponding extra values included:
logger level message extra values authorino
info
\"setting instance base logger\" min level=info\\|debug
, mode=production\\|development
authorino
info
\"booting up authorino\" version
authorino
debug
\"setting up with options\" auth-config-label-selector
, deep-metrics-enabled
, enable-leader-election
, evaluator-cache-size
, ext-auth-grpc-port
, ext-auth-http-port
, health-probe-addr
, log-level
, log-mode
, max-http-request-body-size
, metrics-addr
, oidc-http-port
, oidc-tls-cert
, oidc-tls-cert-key
, secret-label-selector
, timeout
, tls-cert
, tls-cert-key
, watch-namespace
authorino
info
\"attempting to acquire leader lease <namespace>/cb88a58a.authorino.kuadrant.io...\\n\" authorino
info
\"successfully acquired lease <namespace>/cb88a58a.authorino.kuadrant.io\\n\" authorino
info
\"disabling grpc auth service\" authorino
info
\"starting grpc auth service\" port
, tls
authorino
error
\"failed to obtain port for the grpc auth service\" authorino
error
\"failed to load tls cert for the grpc auth\" authorino
error
\"failed to start grpc auth service\" authorino
info
\"disabling http auth service\" authorino
info
\"starting http auth service\" port
, tls
authorino
error
\"failed to obtain port for the http auth service\" authorino
error
\"failed to start http auth service\" authorino
info
\"disabling http oidc service\" authorino
info
\"starting http oidc service\" port
, tls
authorino
error
\"failed to obtain port for the http oidc service\" authorino
error
\"failed to start http oidc service\" authorino
info
\"starting manager\" authorino
error
\"unable to start manager\" authorino
error
\"unable to create controller\" controller=authconfig\\|secret\\|authconfigstatusupdate
authorino
error
\"problem running manager\" authorino
info
\"starting status update manager\" authorino
error
\"unable to start status update manager\" authorino
error
\"problem running status update manager\" authorino.controller-runtime.metrics
info
\"metrics server is starting to listen\" addr
authorino.controller-runtime.manager
info
\"starting metrics server\" path
authorino.controller-runtime.manager.events
debug
\"Normal\" object={kind=ConfigMap, apiVersion=v1}
, reauthorino.ason=LeaderElection
, message=\"authorino-controller-manager-* became leader\"
authorino.controller-runtime.manager.events
debug
\"Normal\" object={kind=Lease, apiVersion=coordination.k8s.io/v1}
, reauthorino.ason=LeaderElection
, message=\"authorino-controller-manager-* became leader\"
authorino.controller-runtime.manager.controller.authconfig
info
\"resource reconciled\" authconfig
authorino.controller-runtime.manager.controller.authconfig
info
\"host already taken\" authconfig
, host
authorino.controller-runtime.manager.controller.authconfig.statusupdater
debug
\"resource status did not change\" authconfig
authorino.controller-runtime.manager.controller.authconfig.statusupdater
debug
\"resource status changed\" authconfig
, authconfig/status
authorino.controller-runtime.manager.controller.authconfig.statusupdater
error
\"failed to update the resource\" authconfig
authorino.controller-runtime.manager.controller.authconfig.statusupdater
info
\"resource status updated\" authconfig
authorino.controller-runtime.manager.controller.secret
info
\"resource reconciled\" authorino.controller-runtime.manager.controller.secret
info
\"could not reconcile authconfigs using api key authorino.authentication\" authorino.service.oidc
info
\"request received\" request id
, url
, realm
, config
, path
authorino.service.oidc
info
\"response sent\" request id
authorino.service.oidc
error
\"failed to serve oidc request\" authorino.service.auth
info
\"incoming authorization request\" request id
, object
authorino.service.auth
debug
\"incoming authorization request\" request id
, object
authorino.service.auth
info
\"outgoing authorization response\" request id
, authorized
, response
, object
authorino.service.auth
debug
\"outgoing authorization response\" request id
, authorized
, response
, object
authorino.service.auth
error
\"failed to create dynamic metadata\" request id
, object
authorino.service.auth.authpipeline
debug
\"skipping config\" request id
, config
, reason
authorino.service.auth.authpipeline.identity
debug
\"identity validated\" request id
, config
, object
authorino.service.auth.authpipeline.identity
debug
\"cannot validate identity\" request id
, config
, reason
authorino.service.auth.authpipeline.identity
error
\"failed to extend identity object\" request id
, config
, object
authorino.service.auth.authpipeline.identity.oidc
error
\"failed to discovery openid connect configuration\" endpoint
authorino.service.auth.authpipeline.identity.oidc
debug
\"auto-refresh of openid connect configuration disabled\" endpoint
, reason
authorino.service.auth.authpipeline.identity.oidc
debug
\"openid connect configuration updated\" endpoint
authorino.service.auth.authpipeline.identity.oauth2
debug
\"sending token introspection request\" request id
, url
, data
authorino.service.auth.authpipeline.identity.kubernetesauth
debug
\"calling kubernetes token review api\" request id
, tokenreview
authorino.service.auth.authpipeline.identity.apikey
error
\"Something went wrong fetching the authorized credentials\" authorino.service.auth.authpipeline.metadata
debug
\"fetched auth metadata\" request id
, config
, object
authorino.service.auth.authpipeline.metadata
debug
\"cannot fetch metadata\" request id
, config
, reason
authorino.service.auth.authpipeline.metadata.http
debug
\"sending request\" request id
, method
, url
, headers
authorino.service.auth.authpipeline.metadata.userinfo
debug
\"fetching user info\" request id
, endpoint
authorino.service.auth.authpipeline.metadata.uma
debug
\"requesting pat\" request id
, url
, data
, headers
authorino.service.auth.authpipeline.metadata.uma
debug
\"querying resources by uri\" request id
, url
authorino.service.auth.authpipeline.metadata.uma
debug
\"getting resource data\" request id
, url
authorino.service.auth.authpipeline.authorization
debug
\"evaluating for input\" request id
, input
authorino.service.auth.authpipeline.authorization
debug
\"access granted\" request id
, config
, object
authorino.service.auth.authpipeline.authorization
debug
\"access denied\" request id
, config
, reason
authorino.service.auth.authpipeline.authorization.opa
error
\"invalid response from policy evaluation\" policy
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to precompile policy\" policy
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to download policy from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
error
\"failed to refresh policy from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
debug
\"external policy unchanged\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.opa
debug
\"auto-refresh of external policy disabled\" policy
, endpoint
, reason
authorino.service.auth.authpipeline.authorization.opa
info
\"policy updated from external registry\" policy
, endpoint
authorino.service.auth.authpipeline.authorization.kubernetesauthz
debug
\"calling kubernetes subject access review api\" request id
, subjectaccessreview
authorino.service.auth.authpipeline.response
debug
\"dynamic response built\" request id
, config
, object
authorino.service.auth.authpipeline.response
debug
\"cannot build dynamic response\" request id
, config
, reason
authorino.service.auth.http
debug
\"bad request\" request id
authorino.service.auth.http
debug
\"not found\" request id
authorino.service.auth.http
debug
\"request body too large\" request id
authorino.service.auth.http
debug
\"service unavailable\" request id
"},{"location":"authorino/docs/user-guides/observability/#examples","title":"Examples","text":"The examples below are all with --log-level=debug
and --log-mode=production
.
Booting up the service {\"level\":\"info\",\"ts\":1669220526.929678,\"logger\":\"authorino\",\"msg\":\"setting instance base logger\",\"min level\":\"debug\",\"mode\":\"production\"}\n{\"level\":\"info\",\"ts\":1669220526.929718,\"logger\":\"authorino\",\"msg\":\"booting up authorino\",\"version\":\"7688cfa32317a49f0461414e741c980e9c05dba3\"}\n{\"level\":\"debug\",\"ts\":1669220526.9297278,\"logger\":\"authorino\",\"msg\":\"setting up with options\",\"auth-config-label-selector\":\"\",\"deep-metrics-enabled\":\"false\",\"enable-leader-election\":\"false\",\"evaluator-cache-size\":\"1\",\"ext-auth-grpc-port\":\"50051\",\"ext-auth-http-port\":\"5001\",\"health-probe-addr\":\":8081\",\"log-level\":\"debug\",\"log-mode\":\"production\",\"max-http-request-body-size\":\"8192\",\"metrics-addr\":\":8080\",\"oidc-http-port\":\"8083\",\"oidc-tls-cert\":\"/etc/ssl/certs/oidc.crt\",\"oidc-tls-cert-key\":\"/etc/ssl/private/oidc.key\",\"secret-label-selector\":\"authorino.kuadrant.io/managed-by=authorino\",\"timeout\":\"0\",\"tls-cert\":\"/etc/ssl/certs/tls.crt\",\"tls-cert-key\":\"/etc/ssl/private/tls.key\",\"watch-namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669220527.9816976,\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\":8080\"}\n{\"level\":\"info\",\"ts\":1669220527.9823213,\"logger\":\"authorino\",\"msg\":\"starting grpc auth service\",\"port\":50051,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9823658,\"logger\":\"authorino\",\"msg\":\"starting http auth service\",\"port\":5001,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9824295,\"logger\":\"authorino\",\"msg\":\"starting http oidc service\",\"port\":8083,\"tls\":true}\n{\"level\":\"info\",\"ts\":1669220527.9825335,\"logger\":\"authorino\",\"msg\":\"starting manager\"}\n{\"level\":\"info\",\"ts\":1669220527.982721,\"logger\":\"authorino\",\"msg\":\"Starting server\",\"path\":\"/metrics\",\"kind\":\"metrics\",\"addr\":\"[::]:8080\"}\n{\"level\":\"info\",\"ts\":1669220527.982766,\"logger\":\"authorino\",\"msg\":\"Starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8081\"}\n{\"level\":\"info\",\"ts\":1669220527.9829438,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"source\":\"kind source: *v1.Secret\"}\n{\"level\":\"info\",\"ts\":1669220527.9829693,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting Controller\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669220527.9829714,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"source\":\"kind source: *v1beta1.AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220527.9830208,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting Controller\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220528.0834699,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting workers\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":1669220528.0836608,\"logger\":\"authorino.controller.secret\",\"msg\":\"Starting workers\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":1669220529.041266,\"logger\":\"authorino\",\"msg\":\"starting status update manager\"}\n{\"level\":\"info\",\"ts\":1669220529.0418258,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting EventSource\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"source\":\"kind source: *v1beta1.AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220529.0418813,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting Controller\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669220529.1432905,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Starting workers\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\",\"worker count\":1}\n
Reconciling an AuthConfig and 2 related API key secrets {\"level\":\"debug\",\"ts\":1669221208.7473805,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsNotLinked\",\"message\":\"No hosts linked to the resource\"},{\"type\":\"Ready\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Unknown\"}],\"summary\":{\"ready\":false,\"hostsReady\":[],\"numHostsReady\":\"0/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7496614,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7532616,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535005,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535596,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7536132,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221208.753772,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.753835,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsLinked\"},{\"type\":\"Ready\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Reconciled\"}],\"summary\":{\"ready\":true,\"hostsReady\":[\"talker-api.127.0.0.1.nip.io\"],\"numHostsReady\":\"1/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7571108,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7573664,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.757429,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586699,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586884,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7586913,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n{\"level\":\"debug\",\"ts\":1669221208.7597604,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n
Enforcing an AuthConfig with authentication based on Kubernetes tokens: - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830460.1486168,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1491194,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830460.150506,\"logger\":\"authorino.service.auth.authpipeline.identity.kubernetesauth\",\"msg\":\"calling kubernetes token review api\",\"request id\":\"8157480586935853928\",\"tokenreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"token\":\"eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"audiences\":[\"talker-api\"]},\"status\":{\"user\":{}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1509938,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830460.1517606,\"logger\":\"authorino.service.auth.authpipeline.identity.oauth2\",\"msg\":\"sending token introspection request\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"data\":\"token=eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA&token_type_hint=requesting_party_token\"}\n{\"level\":\"debug\",\"ts\":1634830460.1620777,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"}}\n{\"level\":\"debug\",\"ts\":1634830460.1622565,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.1670353,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"8157480586935853928\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.169326,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830460.1753876,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"}}\n{\"level\":\"debug\",\"ts\":1634830460.2331996,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830460.2495668,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830460.2927864,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830460.2930083,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"8157480586935853928\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830460.2955465,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"8157480586935853928\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"system:serviceaccount:authorino:api-consumer-1\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830460.2986183,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3044975,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3052874,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3NjAsImlhdCI6MTYzNDgzMDQ2MCwiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI4NDliMDk0ZDA4MzU0ZjM0MjA4ZGI3MjBmYWZmODlmNmM3NmYyOGY3MTcxOWI4NTQ3ZDk5NWNlNzAwMjU2ZGY4In0.Jn-VB5Q_0EX1ed1ji4KvhO4DlMqZeIl5H0qlukbTyYkp-Pgb4SnPGSbYWp5_uvG8xllsFAA5nuyBIXeba-dbkw\"}\n{\"level\":\"info\",\"ts\":1634830460.3054585,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830460.305476,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n
Enforcing an AuthConfig with authentication based on API keys - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830413.2425854,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830413.2426975,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830413.2428744,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830413.2434332,\"logger\":\"authorino.service.auth.authpipeline\",\"msg\":\"skipping config\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"context canceled\"}\n{\"level\":\"debug\",\"ts\":1634830413.2479305,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"object\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"}}\n{\"level\":\"debug\",\"ts\":1634830413.248768,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"7199257136822741594\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.2496722,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830413.2497928,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"7199257136822741594\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.258932,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"}}\n{\"level\":\"debug\",\"ts\":1634830413.2945344,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830413.3123596,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830413.3340268,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830413.3367748,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"7199257136822741594\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830413.339894,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3444238,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"7199257136822741594\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"john\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830413.3547812,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3558292,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3MTMsImlhdCI6MTYzNDgzMDQxMywiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI5NjhiZjViZjk3MDM3NWRiNjE0ZDFhMDgzZTg2NTBhYTVhMGVhMzAyOTdiYmJjMTBlNWVlMWZmYTkxYTYwZmY4In0.7G440sWgi2TIaxrGJf5KWR9UOFpNTjwVYeaJXFLzsLhVNICoMLbYzBAEo4M3ym1jipxxTVeE7anm4qDDc7cnVQ\"}\n{\"level\":\"info\",\"ts\":1634830413.3569078,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830413.3569596,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n
Enforcing an AuthConfig with authentication based on API keys (invalid API key) - identity: k8s-auth, oidc, oauth2, apikey
- metadata: http, oidc userinfo
- authorization: opa, k8s-authz
- response: wristband
{\"level\":\"info\",\"ts\":1634830373.2066543,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830373.2068064,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830373,\"nanos\":198329000},\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY invalid\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"9e391846-afe4-489a-8716-23a2e1c1aa77\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830373.2070816,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-opaque\",\"ExtendedProperties\":[],\"OAuth2\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"TokenIntrospectionUrl\":\"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"TokenTypeHint\":\"requesting_party_token\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.207225,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"the API Key provided is invalid\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072473,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072592,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"info\",\"ts\":1634830373.2073083,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\"}}\n{\"level\":\"debug\",\"ts\":1634830373.2073889,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\",\"headers\":[{\"Location\":\"https://my-app.io/login\"}]}}\n
Deleting an AuthConfig and 2 related API key secrets {\"level\":\"info\",\"ts\":1669221361.5032296,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221361.5057878,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n
Shutting down the service {\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136683,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136883,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0137057,\"logger\":\"authorino.controller.secret\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.013724,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.01375,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.013752,\"logger\":\"authorino.controller.secret\",\"msg\":\"All workers finished\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.0137632,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.013751,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137684,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137722,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.0138857,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0138955,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n{\"level\":\"info\",\"ts\":1669221635.0138893,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0139785,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n
"},{"location":"authorino/docs/user-guides/observability/#tracing","title":"Tracing","text":""},{"location":"authorino/docs/user-guides/observability/#request-id","title":"Request ID","text":"Processes related to the authorization request are identified and linked together by a request ID. The request ID can be:
- generated outside Authorino and passed in the authorization request \u2013 this is essentially the case of requests via GRPC authorization interface initiated by the Envoy;
- generated by Authorino \u2013 requests via Raw HTTP Authorization interface.
"},{"location":"authorino/docs/user-guides/observability/#propagation","title":"Propagation","text":"Authorino propagates trace identifiers compatible with the W3C Trace Context format https://www.w3.org/TR/trace-context/ and user-defined baggage data in the W3C Baggage format https://www.w3.org/TR/baggage.
"},{"location":"authorino/docs/user-guides/observability/#log-tracing","title":"Log tracing","text":"Most log messages associated with an authorization request include the request id
value. This value can be used to match incoming request and corresponding outgoing response log messages, including at deep level when more fine-grained log details are enabled (debug
level level).
"},{"location":"authorino/docs/user-guides/observability/#opentelemetry-integration","title":"OpenTelemetry integration","text":"Integration with an OpenTelemetry collector can be enabled by supplying the --tracing-service-endpoint
command-line flag (e.g. authorino server --tracing-service-endpoint=http://jaeger:14268/api/traces
).
The additional --tracing-service-tags
command-line flag allow to specify fixed agent-level key-value tags for the trace signals emitted by Authorino (e.g. authorino server --tracing-service-endpoint=... --tracing-service-tag=key1=value1 --tracing-service-tag=key2=value2
).
Traces related to authorization requests are additionally tagged with the authorino.request_id
attribute.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/","title":"User guide: OpenID Connect Discovery and authentication with JWTs","text":"Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
Authorino validates JSON Web Tokens (JWT) issued by an OpenID Connect server that implements OpenID Connect Discovery. Authorino fetches the OpenID Connect configuration and JSON Web Key Set (JWKS) from the issuer endpoint, and verifies the JSON Web Signature (JWS) and time validity of the token.
Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#consume-the-api","title":"\u277c Consume the API","text":"With a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
With missing or invalid access token:
curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-kuadrant-realm\"\n# x-ext-auth-reason: credential not found\n
"},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oidc-rbac/","title":"User guide: OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak","text":"Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.
In this user guide, you will learn via example how to implement a simple Role-Based Access Control (RBAC) system to protect endpoints of an API, with roles assigned to users of an Identity Provider (Keycloak) and carried within the access tokens as JSON Web Token (JWT) claims. Users authenticate with the IdP via OAuth2/OIDC flow and get their access tokens verified and validated by Authorino on every request. Moreover, Authorino reads the role bindings of the user and enforces the proper RBAC rules based upon the context.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-rbac/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-rbac/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, the Keycloak realm defines a few users and 2 realm roles: 'member' and 'admin'. When users authenticate to the Keycloak server by any of the supported OAuth2/OIDC flows, Keycloak adds to the access token JWT a claim \"realm_access\": { \"roles\": array }
that holds the list of roles assigned to the user. Authorino will verify the JWT on requests to the API and read from that claim to enforce the following RBAC rules:
Path Method Role /resources[/*] GET / POST / PUT member /resources/{id} DELETE admin /admin[/*] * admin Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Apply the AuthConfig:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n\n patterns:\n \"member-role\":\n\n - selector: auth.identity.realm_access.roles\n operator: incl\n value: member\n \"admin-role\":\n - selector: auth.identity.realm_access.roles\n operator: incl\n value: admin\n\n authorization:\n # RBAC rule: 'member' role required for requests to /resources[/*]\n \"rbac-resources-api\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources(/.*)?$\n patternMatching:\n patterns:\n - patternRef: member-role\n\n # RBAC rule: 'admin' role required for DELETE requests to /resources/{id}\n \"rbac-delete-resource\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/resources/\\d+$\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n patternMatching:\n patterns:\n - patternRef: admin-role\n\n # RBAC rule: 'admin' role required for requests to /admin[/*]\n \"rbac-admin-api\":\n when:\n\n - selector: context.request.http.path\n operator: matches\n value: ^/admin(/.*)?$\n patternMatching:\n patterns:\n - patternRef: admin-role\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user John, who is assigned to the 'member' role:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, send a GET
request to /resources:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n
As John, send a DELETE
request to /resources/123:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 403 Forbidden\n
As John, send a GET
request to /admin/settings:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-jane-memberadmin","title":"Obtain an access token and consume the API as Jane (member/admin)","text":"Obtain an access token from within the cluster for the user Jane, who is assigned to the 'member' and 'admin' roles:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send a GET
request to /resources:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n
As Jane, send a DELETE
request to /resources/123:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 200 OK\n
As Jane, send a GET
request to /admin/settings:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/oidc-rbac/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/oidc-user-info/","title":"User guide: OpenID Connect UserInfo","text":"Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.
Authorino capabilities featured in this guide: - External auth metadata \u2192 OIDC UserInfo
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline. Implementation requires an OpenID Connect issuer (spec.identity.oidc
) configured in the same AuthConfig
.
Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"userinfo\":\n userInfo:\n identitySource: keycloak-kuadrant-realm\n authorization:\n \"active-tokens-only\":\n patternMatching:\n patterns:\n - selector: \"auth.metadata.userinfo.email\" # user email expected from the userinfo instead of the jwt\n operator: neq\n value: \"\"\nEOF\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
"},{"location":"authorino/docs/user-guides/oidc-user-info/#consume-the-api","title":"\u277c Consume the API","text":"With a valid access token:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Revoke the access token and try to consume the API again:
kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/oidc-user-info/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/opa-authorization/","title":"User guide: Open Policy Agent (OPA) Rego policies","text":"Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.
Authorino capabilities featured in this guide: - Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Identity verification & authentication \u2192 API key
Authorino supports Open Policy Agent policies, either inline defined in Rego language as part of the AuthConfig
or fetched from an external endpoint, such as an OPA Policy Registry.
Authorino's built-in OPA module precompiles the policies in reconciliation-time and cache them for fast evaluation in request-time, where they receive the Authorization JSON as input.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/opa-authorization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/opa-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, we will use OPA to implement a read-only policy for requests coming from outside a trusted network (IP range 192.168.1/24).
The implementation relies on the X-Forwarded-For
HTTP header to read the client's IP address.5
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n authorization:\n \"read-only-outside\":\n opa:\n rego: |\n ips := split(input.context.request.http.headers[\"x-forwarded-for\"], \",\")\n trusted_network { net.cidr_contains(\"192.168.1.1/24\", ips[0]) }\n\n allow { trusted_network }\n allow { not trusted_network; input.context.request.http.method == \"GET\" }\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#create-the-api-key","title":"\u277b Create the API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#consume-the-api","title":"\u277c Consume the API","text":"Inside the trusted network:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 192.168.1.10' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Outside the trusted network:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n -H 'X-Forwarded-For: 123.45.6.78' \\\n -X POST \\\n http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
"},{"location":"authorino/docs/user-guides/opa-authorization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
-
You can also set use_remote_address: true
in the Envoy route configuration, so the proxy will append its IP address instead of run in transparent mode. This setting will also ensure real remote address of the client connection passed in the x-envoy-external-address
HTTP header, which can be used to simplify the read-only policy in remote environment.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/passing-credentials/","title":"User guide: Passing credentials (Authorization
header, cookie headers and others)","text":"Customize where credentials are supplied in the request by each trusted source of identity.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Auth credentials
- Identity verification & authentication \u2192 API key
Authentication tokens can be supplied in the Authorization
header, in a custom header, cookie or query string parameter.
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/passing-credentials/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/passing-credentials/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
In this example, member
users can authenticate supplying the API key in any of 4 different ways:
- HTTP header
Authorization: APIKEY <api-key>
- HTTP header
X-API-Key: <api-key>
- Query string parameter
api_key=<api-key>
- Cookie
Cookie: APIKEY=<api-key>;
admin
API keys are only accepted in the (default) HTTP header Authorization: Bearer <api-key>
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"members-authorization-header\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n authorizationHeader:\n prefix: APIKEY # instead of the default prefix 'Bearer'\n \"members-custom-header\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n customHeader:\n name: X-API-Key\n \"members-query-string-param\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n queryString:\n name: api_key\n \"members-cookie\":\n apiKey:\n selector:\n matchLabels:\n group: members\n credentials:\n cookie:\n name: APIKEY\n \"admins\":\n apiKey:\n selector:\n matchLabels:\n group: admins\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#create-the-api-keys","title":"\u277b Create the API keys","text":"For a member user:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: members\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
For an admin user:
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: admins\nstringData:\n api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#consume-the-api","title":"\u277c Consume the API","text":"As member user, passing the API key in the Authorization
header:
curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the custom X-API-Key
header:
curl -H 'X-API-Key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the query string parameter api_key
:
curl \"http://talker-api.127.0.0.1.nip.io:8000/hello?api_key=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\"\n# HTTP/1.1 200 OK\n
As member user, passing the API key in the APIKEY
cookie header:
curl -H 'Cookie: APIKEY=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx;foo=bar' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
As admin user:
curl -H 'Authorization: Bearer 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
Missing the API key:
curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"members-authorization-header\"\n# www-authenticate: X-API-Key realm=\"members-custom-header\"\n# www-authenticate: api_key realm=\"members-query-string-param\"\n# www-authenticate: APIKEY realm=\"members-cookie\"\n# www-authenticate: Bearer realm=\"admins\"\n# x-ext-auth-reason: {\"admins\":\"credential not found\",\"members-authorization-header\":\"credential not found\",\"members-cookie\":\"credential not found\",\"members-custom-header\":\"credential not found\",\"members-query-string-param\":\"credential not found\"}\n
"},{"location":"authorino/docs/user-guides/passing-credentials/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/","title":"User guide: Resource-level authorization with User-Managed Access (UMA) resource registry","text":"Fetch resource metadata relevant for your authorization policies from Keycloak authorization clients, using User-Managed Access (UMA) protocol.
Authorino capabilities featured in this guide: - External auth metadata \u2192 User-Managed Access (UMA) resource registry
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Open Policy Agent (OPA) Rego policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced:
This example of resource-level authorization leverages part of Keycloak's User-Managed Access (UMA) support. Authorino will fetch resource attributes stored in a Keycloak resource server client.
The Keycloak server also provides the identities. The sub
claim of the Keycloak-issued ID tokens must match the owner of the requested resource, identified by the URI of the request.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. Create a required secret that will be used by Authorino to initiate the authentication with the UMA registry.
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: talker-api-uma-credentials\nstringData:\n clientID: talker-api\n clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\nEOF\n
Create the config:
kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"resource-data\":\n uma:\n endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n credentialsRef:\n name: talker-api-uma-credentials\n authorization:\n \"owned-resources\":\n opa:\n rego: |\n COLLECTIONS = [\"greetings\"]\n\n http_request = input.context.request.http\n http_method = http_request.method\n requested_path_sections = split(trim_left(trim_right(http_request.path, \"/\"), \"/\"), \"/\")\n\n get { http_method == \"GET\" }\n post { http_method == \"POST\" }\n put { http_method == \"PUT\" }\n delete { http_method == \"DELETE\" }\n\n valid_collection { COLLECTIONS[_] == requested_path_sections[0] }\n\n collection_endpoint {\n valid_collection\n count(requested_path_sections) == 1\n }\n\n resource_endpoint {\n valid_collection\n some resource_id\n requested_path_sections[1] = resource_id\n }\n\n identity_owns_the_resource {\n identity := input.auth.identity\n resource_attrs := object.get(input.auth.metadata, \"resource-data\", [])[0]\n resource_owner := object.get(object.get(resource_attrs, \"owner\", {}), \"id\", \"\")\n resource_owner == identity.sub\n }\n\n allow { get; collection_endpoint }\n allow { post; collection_endpoint }\n allow { get; resource_endpoint; identity_owns_the_resource }\n allow { put; resource_endpoint; identity_owns_the_resource }\n allow { delete; resource_endpoint; identity_owns_the_resource }\nEOF\n
The OPA policy owned-resource
above enforces that all users can send GET and POST requests to /greetings
, while only resource owners can send GET, PUT and DELETE requests to /greetings/{resource-id}
.
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-access-tokens-with-the-keycloak-server-and-consume-the-api","title":"\u277b Obtain access tokens with the Keycloak server and consume the API","text":""},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-john-and-consume-the-api","title":"Obtain an access token as John and consume the API","text":"Obtain an access token for user John (owner of the resource /greetings/1
in the UMA registry):
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
As John, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-jane-and-consume-the-api","title":"Obtain an access token as Jane and consume the API","text":"Obtain an access token for user Jane (owner of the resource /greetings/2
in the UMA registry):
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-peter-and-consume-the-api","title":"Obtain an access token as Peter and consume the API","text":"Obtain an access token for user Peter (does not own any resource in the UMA registry):
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
As Jane, send requests to the API:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authconfig/talker-api-protection\nkubectl delete secret/talker-api-uma-credentials\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/sharding/","title":"User guide: Reducing the operational space","text":"By default, Authorino will watch events related to all AuthConfig
custom resources in the reconciliation space (namespace or entire cluster). Instances can be configured though to only watch a subset of the resources, thus allowing such as:
- to reduce noise and lower memory usage inside instances meant for restricted scope (e.g. Authorino deployed as a dedicated sidecar to protect only one host);
- sharding auth config data across multiple instances;
- multiple environments (e.g. staging, production) inside of a same cluster/namespace;
- providing managed instances of Authorino that all watch CRs cluster-wide, yet dedicated to organizations allowed to create and operate their own
AuthConfig
s across multiple namespaces.
\u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. Authorino capabilities featured in this guide: - Sharding
- Identity verification & authentication \u2192 API key
Check out as well the user guide about Authentication with API keys.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/sharding/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
"},{"location":"authorino/docs/user-guides/sharding/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/sharding/#deploy-instances-of-authorino","title":"\u2777 Deploy instances of Authorino","text":"Deploy an instance of Authorino dedicated to AuthConfig
s and API key Secrets
labeled with authorino/environment=staging
:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino-staging\nspec:\n clusterWide: true\n authConfigLabelSelectors: authorino/environment=staging\n secretLabelSelectors: authorino/environment=staging\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
Deploy an instance of Authorino dedicated to AuthConfig
s and API key Secrets
labeled with authorino/environment=production
, ans NOT labeled disabled
:
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino-production\nspec:\n clusterWide: true\n authConfigLabelSelectors: authorino/environment=production,!disabled\n secretLabelSelectors: authorino/environment=production,!disabled\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
The commands above will both request instances of Authorino that watch for AuthConfig
resources cluster-wide1, with TLS disabled2.
"},{"location":"authorino/docs/user-guides/sharding/#create-a-namespace-for-user-resources","title":"\u2778 Create a namespace for user resources","text":"kubectl create namespace myapp\n
"},{"location":"authorino/docs/user-guides/sharding/#create-authconfigs-and-api-key-secrets-for-both-instances","title":"\u2779 Create AuthConfig
s and API key Secret
s for both instances","text":""},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-staging","title":"Create resources for authorino-staging
","text":"Create an AuthConfig
:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: auth-config-1\n labels:\n authorino/environment: staging\nspec:\n hosts:\n\n - my-host.staging.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels:\n authorino/api-key: \"true\"\n authorino/environment: staging\nEOF\n
Create an API key Secret
:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino/api-key: \"true\"\n authorino/environment: staging\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
Verify in the logs that only the authorino-staging
instance adds the resources to the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-staging -o name)\n# {\"level\":\"info\",\"ts\":1638382989.8327162,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638382989.837424,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638383144.9486837,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-1\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-production","title":"Create resources for authorino-production
","text":"Create an AuthConfig
:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: auth-config-2\n labels:\n authorino/environment: production\nspec:\n hosts:\n\n - my-host.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels:\n authorino/api-key: \"true\"\n authorino/environment: production\nEOF\n
Create an API key Secret
:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-2\n labels:\n authorino/api-key: \"true\"\n authorino/environment: production\nstringData:\n api_key: MUWdeBte7AbSWxl6CcvYNJ+3yEIm5CaL\ntype: Opaque\nEOF\n
Verify in the logs that only the authorino-production
instance adds the resources to the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383423.86086,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383423.8608105,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383460.3515081,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-2\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#remove-a-resource-from-scope","title":"\u277a Remove a resource from scope","text":"kubectl -n myapp label authconfig/auth-config-2 disabled=true\n# authconfig.authorino.kuadrant.io/auth-config-2 labeled\n
Verify in the logs that the authorino-production
instance removes the authconfig from the index:
kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383515.6428752,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource de-indexed\",\"authconfig\":\"myapp/auth-config-2\"}\n
"},{"location":"authorino/docs/user-guides/sharding/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete authorino/authorino-staging\nkubectl delete authorino/authorino-production\nkubectl delete namespace myapp\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/token-normalization/","title":"User guide: Token normalization","text":"Broadly, the term token normalization in authentication systems usually implies the exchange of an authentication token, as provided by the user in a given format, and/or its associated identity claims, for another freshly issued token/set of claims, of a given (normalized) structure or format.
The most typical use-case for token normalization involves accepting tokens issued by multiple trusted sources and of often varied authentication protocols, while ensuring that the eventual different data structures adopted by each of those sources are normalized, thus allowing to simplify policies and authorization checks that depend on those values. In general, however, any modification to the identity claims can be for the purpose of normalization.
This user guide focuses on the aspect of mutation of the identity claims resolved from an authentication token, to a certain data format and/or by extending them, so that required attributes can thereafter be trusted to be present among the claims, in a desired form. For such, Authorino allows to extend resolved identity objects with custom attributes (custom claims) of either static values or with values fetched from the Authorization JSON.
For not only normalizing the identity claims for purpose of writing simpler authorization checks and policies, but also getting Authorino to issue a new token in a normalized format, check the Festival Wristband tokens feature.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Identity extension
- Identity verification & authentication \u2192 API key
- Identity verification & authentication \u2192 JWT verification
- Authorization \u2192 Pattern-matching authorization
Check out as well the user guides about Authentication with API keys, OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/token-normalization/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
- jq, to extract parts of JSON responses
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.
At step \u277a, instead of creating an AuthConfig
custom resource, create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/token-normalization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/token-normalization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources in the default
namespace2, with TLS disabled3.
kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
"},{"location":"authorino/docs/user-guides/token-normalization/#setup-envoy","title":"\u2779 Setup Envoy","text":"The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4
kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n
The command above creates an Ingress
with host name talker-api.127.0.0.1.nip.io
. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:
kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
"},{"location":"authorino/docs/user-guides/token-normalization/#create-an-authconfig","title":"\u277a Create an AuthConfig
","text":"Create an Authorino AuthConfig
custom resource declaring the auth rules to be enforced.
This example implements a policy that only users bound to the admin
role can send DELETE
requests.
The config trusts access tokens issued by a Keycloak realm as well as API keys labeled specifically to a selected group (friends
). The roles of the identities handled by Keycloak are managed in Keycloak, as realm roles. Particularly, users john
and peter
are bound to the member
role, while user jane
is bound to roles member
and admin
. As for the users authenticating with API key, they are all bound to the admin
role.
Without normalizing identity claims from these two different sources, the policy would have to handle the differences of data formats with additional ifs-and-elses. Instead, the config here uses the identity.extendedProperties
option to ensure a custom roles
(Array) claim is always present in the identity object. In the case of Keycloak ID tokens, the value is extracted from the realm_access.roles
claim; for API key-resolved objects, the custom claim is set to the static value [\"admin\"]
.
Kuadrant users \u2013 Remember to create an AuthPolicy
instead of an AuthConfig. For more, see Kuadrant auth. kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: talker-api-protection\nspec:\n hosts:\n\n - talker-api.127.0.0.1.nip.io\n authentication:\n \"keycloak-kuadrant-realm\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n overrides:\n \"roles\":\n selector: auth.identity.realm_access.roles\n \"api-key-friends\":\n apiKey:\n selector:\n matchLabels:\n group: friends\n credentials:\n authorizationHeader:\n prefix: APIKEY\n defaults:\n \"roles\":\n value: [\"admin\"]\n authorization:\n \"only-admins-can-delete\":\n when:\n - selector: context.request.http.method\n operator: eq\n value: DELETE\n patternMatching:\n patterns:\n - selector: auth.identity.roles\n operator: incl\n value: admin\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#create-an-api-key","title":"\u277b Create an API key","text":"kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n group: friends\nstringData:\n api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
"},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"Obtain an access token with the Keycloak server for Jane:
The AuthConfig
deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss
claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.
Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig
matches the one used to obtain the token and is as well reachable from within the cluster.
Consume the API as Jane:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"Obtain an access token with the Keycloak server for John:
ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n
Consume the API as John:
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api-using-the-api-key-to-authenticate-admin","title":"Consume the API using the API key to authenticate (admin)","text":"curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
"},{"location":"authorino/docs/user-guides/token-normalization/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
namespaced
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
-
For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9
"},{"location":"authorino/docs/user-guides/validating-webhook/","title":"User guide: Using Authorino as ValidatingWebhook service","text":"Authorino provides an interface for raw HTTP external authorization requests. This interface can be used for integrations other than the typical Envoy gRPC protocol, such as (though not limited to) using Authorino as a generic Kubernetes ValidatingWebhook service.
The rules to validate a request to the Kubernetes API \u2013 typically a POST
, PUT
or DELETE
request targeting a particular Kubernetes resource or collection \u2013, according to which either the change will be deemed accepted or not, are written in an Authorino AuthConfig
custom resource. Authentication and authorization are performed by the Kubernetes API server as usual, with auth features of Authorino implementing the additional validation within the scope of an AdmissionReview
request.
This user guide provides an example of using Authorino as a Kubernetes ValidatingWebhook service that validates requests to CREATE
and UPDATE
Authorino AuthConfig
resources. In other words, we will use Authorino as a validator inside the cluster that decides what is a valid AuthConfig for any application which wants to rely on Authorino to protect itself.
Authorino capabilities featured in this guide: - Identity verification & authentication \u2192 Plain
- Identity verification & authentication \u2192 Kubernetes TokenReview
- Identity verification & authentication \u2192 API key
- External auth metadata \u2192 HTTP GET/GET-by-POST
- Authorization \u2192 Kubernetes SubjectAccessReview
- Authorization \u2192 Open Policy Agent (OPA) Rego policies
- Dynamic response \u2192 Festival Wristband tokens
- Common feature \u2192 Conditions
- Common feature \u2192 Priorities
For further details about Authorino features in general, check the docs.
"},{"location":"authorino/docs/user-guides/validating-webhook/#requirements","title":"Requirements","text":" - Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
- Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.
kind create cluster --name authorino-tutorial\n
Deploy the identity provider and authentication server. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.
The Keycloak server is only needed for trying out validating AuthConfig resources that use the authentication server.
kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n
The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.
Using Kuadrant If you are a user of Kuadrant you may already have Authorino installed and running. In this case, skip straight to step \u2778.
At step \u277a, alternatively to creating an AuthConfig
custom resource, you may create a Kuadrant AuthPolicy
one. The schema of the AuthConfig's spec
matches the one of the AuthPolicy's, except spec.host
, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef
and route selectors declared in the policy.
For more about using Kuadrant to enforce authorization, check out Kuadrant auth.
"},{"location":"authorino/docs/user-guides/validating-webhook/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.
curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"Create the namespace:
kubectl create namespace authorino\n
Create the TLS certificates:
curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n
Create the Authorino instance:
The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig
resources cluster-wide2, with TLS enabled3.
kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n listener:\n ports:\n grpc: 50051\n http: 5001 # for admissionreview requests sent by the kubernetes api server\n tls:\n certSecretRef:\n name: authorino-server-cert\n oidcServer:\n tls:\n certSecretRef:\n name: authorino-oidc-server-cert\nEOF\n
For convenience, the same instance of Authorino pointed as the validating webhook will also be targeted for the sample AuthConfigs created to test the validation. For using different instances of Authorino for the validating webhook and for protecting applications behind a proxy, check out the section about sharding in the docs. There is also a user guide on the topic, with concrete examples.
"},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-authconfig-and-related-clusterrole","title":"\u2778 Create the AuthConfig
and related ClusterRole
","text":"Create the AuthConfig
with the auth rules to validate other AuthConfig resources applied to the cluster.
The AuthConfig to validate other AuthConfigs will enforce the following rules:
- Authorino features that cannot be used by any application in their security schemes:
- Anonymous Access
- Plain identity object extracted from context
- Kubernetes authentication (TokenReview)
- Kubernetes authorization (SubjectAccessReview)
- Festival Wristband tokens
- Authorino features that require a RoleBinding to a specific ClusterRole in the 'authorino' namespace, to be used in a AuthConfig:
- Authorino API key authentication
- All metadata pulled from external sources must be cached for precisely 5 minutes (300 seconds)
kubectl -n authorino apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: authconfig-validator\nspec:\n # admissionreview requests will be sent to this host name\n hosts:\n\n - authorino-authorino-authorization.authorino.svc\n\n # because we're using a single authorino instance for the validating webhook and to protect the user applications,\n # skip operations related to this one authconfig in the 'authorino' namespace\n when:\n\n - selector: context.request.http.body.@fromstr|request.object.metadata.namespace\n operator: neq\n value: authorino\n\n # kubernetes admissionreviews carry info about the authenticated user\n authentication:\n \"k8s-userinfo\":\n plain:\n selector: context.request.http.body.@fromstr|request.userInfo\n\n authorization:\n \"features\":\n opa:\n rego: |\n authconfig = json.unmarshal(input.context.request.http.body).request.object\n\n forbidden { count(object.get(authconfig.spec, \"authentication\", [])) == 0 }\n forbidden { authconfig.spec.authentication[_].anonymous }\n forbidden { authconfig.spec.authentication[_].kubernetesTokenReview }\n forbidden { authconfig.spec.authentication[_].plain }\n forbidden { authconfig.spec.authorization[_].kubernetesSubjectAccessReview }\n forbidden { authconfig.spec.response.success.headers[_].wristband }\n\n apiKey { authconfig.spec.authentication[_].apiKey }\n\n allow { count(authconfig.spec.authentication) > 0; not forbidden }\n allValues: true\n\n \"apikey-authn-requires-k8s-role-binding\":\n priority: 1\n when:\n\n - selector: auth.authorization.features.apiKey\n operator: eq\n value: \"true\"\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.username\n resourceAttributes:\n namespace: { value: authorino }\n group: { value: authorino.kuadrant.io }\n resource: { value: authconfigs-with-apikeys }\n verb: { value: create }\n\n \"metadata-cache-ttl\":\n priority: 1\n opa:\n rego: |\n invalid_ttl = input.auth.authorization.features.authconfig.spec.metadata[_].cache.ttl != 300\n allow { not invalid_ttl }\nEOF\n
Define a ClusterRole
to control the usage of protected features of Authorino:
kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: authorino-apikey\nrules:\n\n- apiGroups: [\"authorino.kuadrant.io\"]\n resources: [\"authconfigs-with-apikeys\"] # not a real k8s resource\n verbs: [\"create\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-validatingwebhookconfiguration","title":"\u2779 Create the ValidatingWebhookConfiguration
","text":"kubectl -n authorino apply -f -<<EOF\napiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: authconfig-authz\n annotations:\n cert-manager.io/inject-ca-from: authorino/authorino-ca-cert\nwebhooks:\n\n- name: check-authconfig.authorino.kuadrant.io\n clientConfig:\n service:\n namespace: authorino\n name: authorino-authorino-authorization\n port: 5001\n path: /check\n rules:\n - apiGroups: [\"authorino.kuadrant.io\"]\n apiVersions: [\"v1beta2\"]\n resources: [\"authconfigs\"]\n operations: [\"CREATE\", \"UPDATE\"]\n scope: Namespaced\n sideEffects: None\n admissionReviewVersions: [\"v1\"]\nEOF\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#try-it-out","title":"\u277a Try it out","text":"Create a namespace:
kubectl create namespace myapp\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-a-valid-authconfig","title":"With a valid AuthConfig
","text":"Kuadrant users \u2013 For this and other example AuthConfigs below, if you create a Kuadrant AuthPolicy
instead, the output of the commands shall differ. The requested AuthPolicy may be initially accepted, but its state will turn ready or not ready depending on whether the corresponding AuthConfig requested by Kuadrant is accepted or rejected, according to the validating webhook rules. Check the state of the resources to confirm. For more, see Kuadrant auth. kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection created\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-forbidden-features","title":"With forbidden features","text":"Anonymous access:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":null}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"anonymous-access\":\n anonymous: {}\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"anonymous-access\\\":{\\\"anonymous\\\":{}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"anonymous-access\":{\"anonymous\":{}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Kubernetes TokenReview:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"k8s-tokenreview\":\n kubernetesTokenReview:\n audiences: [\"myapp\"]\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"k8s-tokenreview\\\":{\\\"kubernetesTokenReview\\\":{\\\"audiences\\\":[\\\"myapp\\\"]}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"k8s-tokenreview\":{\"kubernetesTokenReview\":{\"audiences\":[\"myapp\"]}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Plain identity extracted from context:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"envoy-jwt-authn\":\n plain:\n selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"envoy-jwt-authn\\\":{\\\"plain\\\":{\\\"selector\\\":\\\"context.metadata_context.filter_metadata.envoy\\\\\\\\.filters\\\\\\\\.http\\\\\\\\.jwt_authn|verified_jwt\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"envoy-jwt-authn\":{\"plain\":{\"selector\":\"context.metadata_context.filter_metadata.envoy\\\\.filters\\\\.http\\\\.jwt_authn|verified_jwt\"}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Kubernetes SubjectAccessReview:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n authorization:\n \"k8s-subjectaccessreview\":\n kubernetesSubjectAccessReview:\n user:\n selector: auth.identity.sub\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"authorization\\\":{\\\"k8s-subjectaccessreview\\\":{\\\"kubernetesSubjectAccessReview\\\":{\\\"user\\\":{\\\"selector\\\":\\\"auth.identity.sub\\\"}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authorization\":{\"k8s-subjectaccessreview\":{\"kubernetesSubjectAccessReview\":{\"user\":{\"selector\":\"auth.identity.sub\"}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Festival Wristband tokens:
kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: wristband-signing-key\nstringData:\n key.pem: |\n -----BEGIN EC PRIVATE KEY-----\n MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n -----END EC PRIVATE KEY-----\ntype: Opaque\n---\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n response:\n success:\n headers:\n \"wristband\":\n wristband:\n issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\n signingKeyRefs:\n - algorithm: ES256\n name: wristband-signing-key\nEOF\n# secret/wristband-signing-key created\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"response\\\":{\\\"success\\\":{\\\"headers\\\":{\\\"wristband\\\":{\\\"wristband\\\":{\\\"issuer\\\":\\\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\\\",\\\"signingKeyRefs\\\":[{\\\"algorithm\\\":\\\"ES256\\\",\\\"name\\\":\\\"wristband-signing-key\\\"}]}}}}}}}\\n\"}},\"spec\":{\"response\":{\"success\":{\"headers\":{\"wristband\":{\"wristband\":{\"issuer\":\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\",\"signingKeyRefs\":[{\"algorithm\":\"ES256\",\"name\":\"wristband-signing-key\"}]}}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-additional-permissions","title":"With features that require additional permissions","text":"Before adding the required permissions:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels: { app: myapp }\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"api-key\\\":{\\\"apiKey\\\":{\\\"selector\\\":{\\\"matchLabels\\\":{\\\"app\\\":\\\"myapp\\\"}}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":{\"apiKey\":{\"selector\":{\"matchLabels\":{\"app\":\"myapp\"}}}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Not authorized: unknown reason\n
Add the required permissions:
kubectl -n authorino apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: authorino-apikey\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: authorino-apikey\nsubjects:\n\n- kind: User\n name: kubernetes-admin\nEOF\n# rolebinding.rbac.authorization.k8s.io/authorino-apikey created\n
After adding the required permissions:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"api-key\":\n apiKey:\n selector:\n matchLabels: { app: myapp }\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-specific-property-validation","title":"With features that require specific property validation","text":"Invalid:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"external-source\":\n http:\n url: http://metadata.io\n cache:\n key: { value: global }\n ttl: 60\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta2\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"metadata\\\":{\\\"external-source\\\":{\\\"cache\\\":{\\\"key\\\":{\\\"value\\\":\\\"global\\\"},\\\"ttl\\\":60},\\\"http\\\":{\\\"url\\\":\\\"http://metadata.io\\\"}}}}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":null,\"keycloak\":{\"jwt\":{\"issuerUrl\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\"}}},\"metadata\":{\"external-source\":{\"cache\":{\"key\":{\"value\":\"global\"},\"ttl\":60},\"http\":{\"url\":\"http://metadata.io\"}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta2, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta2, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
Valid:
kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta2\nkind: AuthConfig\nmetadata:\n name: myapp-protection\nspec:\n hosts:\n\n - myapp.io\n authentication:\n \"keycloak\":\n jwt:\n issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n metadata:\n \"external-source\":\n http:\n url: http://metadata.io\n cache:\n key: { value: global }\n ttl: 300\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
"},{"location":"authorino/docs/user-guides/validating-webhook/#cleanup","title":"Cleanup","text":"If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:
kind delete cluster --name authorino-tutorial\n
Otherwise, delete the resources created in each step:
kubectl delete namespace myapp\nkubectl delete namespace authorino\nkubectl delete clusterrole authorino-apikey\nkubectl delete namespace keycloak\n
To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:
kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
-
In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9
-
cluster-wide
reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9
-
For other variants and deployment options, check out Getting Started, as well as the Authorino
CRD specification.\u00a0\u21a9
"},{"location":"authorino-operator/","title":"Authorino Operator","text":"A Kubernetes Operator to manage Authorino instances.
"},{"location":"authorino-operator/#installation","title":"Installation","text":"The Operator can be installed by applying the manifests to the Kubernetes cluster or using Operator Lifecycle Manager (OLM)
"},{"location":"authorino-operator/#applying-the-manifests-to-the-cluster","title":"Applying the manifests to the cluster","text":" - Create the namespace for the Operator
kubectl create namespace authorino-operator\n
- Install the Operator manifests
make install\n
- Deploy the Operator
make deploy\n
Tip: Deploy a custom image of the Operator To deploy an image of the Operator other than the default quay.io/kuadrant/authorino-operator:latest
, specify by setting the OPERATOR_IMAGE
parameter. E.g.: make deploy OPERATOR_IMAGE=authorino-operator:local\n
"},{"location":"authorino-operator/#installing-via-olm","title":"Installing via OLM","text":"To install the Operator using the Operator Lifecycle Manager, you need to make the Operator CSVs available in the cluster by creating a CatalogSource
resource.
The bundle and catalog images of the Operator are available in Quay.io:
Bundle quay.io/kuadrant/authorino-operator-bundle Catalog quay.io/kuadrant/authorino-operator-catalog - Create the namespace for the Operator
kubectl create namespace authorino-operator\n
- Create the CatalogSource resource pointing to one of the images from in the Operator's catalog repo:
kubectl -n authorino-operator apply -f -<<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n name: operatorhubio-catalog\n namespace: authorino-operator\nspec:\n sourceType: grpc\n image: quay.io/kuadrant/authorino-operator-catalog:latest\n displayName: Authorino Operator\nEOF\n
"},{"location":"authorino-operator/#requesting-an-authorino-instance","title":"Requesting an Authorino instance","text":"Once the Operator is up and running, you can request instances of Authorino by creating Authorino
CRs. E.g.:
kubectl -n default apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n listener:\n tls:\n enabled: false\n oidcServer:\n tls:\n enabled: false\nEOF\n
"},{"location":"authorino-operator/#the-authorino-custom-resource-definition-crd","title":"The Authorino
Custom Resource Definition (CRD)","text":"API to install, manage and configure Authorino authorization services .
Each Authorino
Custom Resource (CR) represents an instance of Authorino deployed to the cluster. The Authorino Operator will reconcile the state of the Kubernetes Deployment and associated resources, based on the state of the CR.
"},{"location":"authorino-operator/#api-specification","title":"API Specification","text":"Field Type Description Required/Default spec AuthorinoSpec Specification of the Authorino deployment. Required"},{"location":"authorino-operator/#authorinospec","title":"AuthorinoSpec","text":"Field Type Description Required/Default clusterWide Boolean Sets the Authorino instance's watching scope \u2013 cluster-wide or namespaced. Default: true
(cluster-wide) authConfigLabelSelectors String Label selectors used by the Authorino instance to filter AuthConfig
-related reconciliation events. Default: empty (all AuthConfigs are watched) secretLabelSelectors String Label selectors used by the Authorino instance to filter Secret
-related reconciliation events (API key and mTLS authentication methods). Default: authorino.kuadrant.io/managed-by=authorino
supersedingHostSubsets Boolean Enable/disable allowing AuthConfigs to supersede strict subsets of hosts already taken. Default: false
replicas Integer Number of replicas desired for the Authorino instance. Values greater than 1 enable leader election in the Authorino service, where the leader updates the statuses of the AuthConfig
CRs). Default: 1 evaluatorCacheSize Integer Cache size (in megabytes) of each Authorino evaluator (when enabled in an AuthConfig
). Default: 1 image String Authorino image to be deployed (for dev/testing purpose only). Default: quay.io/kuadrant/authorino:latest
imagePullPolicy String Sets the imagePullPolicy of the Authorino Deployment (for dev/testing purpose only). Default: k8s default logLevel String Defines the level of log you want to enable in Authorino (debug
, info
and error
). Default: info
logMode String Defines the log mode in Authorino (development
or production
). Default: production
listener Listener Specification of the authorization service (gRPC interface). Required oidcServer OIDCServer Specification of the OIDC service. Required tracing Tracing Configuration of the OpenTelemetry tracing exporter. Optional metrics Metrics Configuration of the metrics server (port, level). Optional healthz Healthz Configuration of the health/readiness probe (port). Optional volumes VolumesSpec Additional volumes to be mounted in the Authorino pods. Optional"},{"location":"authorino-operator/#listener","title":"Listener","text":"Configuration of the authorization server \u2013 gRPC and raw HTTP interfaces
Field Type Description Required/Default port Integer Port number of authorization server (gRPC interface). DEPRECATEDUse ports
instead ports Ports Port numbers of the authorization server (gRPC and raw HTTPinterfaces). Optional tls TLS TLS configuration of the authorization server (GRPC and HTTP interfaces). Required timeout Integer Timeout of external authorization request (in milliseconds), controlled internally by the authorization server. Default: 0
(disabled)"},{"location":"authorino-operator/#oidcserver","title":"OIDCServer","text":"Configuration of the OIDC Discovery server for Festival Wristband tokens.
Field Type Description Required/Default port Integer Port number of OIDC Discovery server for Festival Wristband tokens. Default: 8083
tls TLS TLS configuration of the OIDC Discovery server for Festival Wristband tokens Required"},{"location":"authorino-operator/#tls","title":"TLS","text":"TLS configuration of server. Appears in listener
and oidcServer
.
Field Type Description Required/Default enabled Boolean Whether TLS is enabled or disabled for the server. Default: true
certSecretRef LocalObjectReference The reference to the secret that contains the TLS certificates tls.crt
and tls.key
. Required when enabled: true
"},{"location":"authorino-operator/#ports","title":"Ports","text":"Port numbers of the authorization server.
Field Type Description Required/Default grpc Integer Port number of the gRPC interface of the authorization server. Set to 0 to disable this interface. Default: 50001
http Integer Port number of the raw HTTP interface of the authorization server. Set to 0 to disable this interface. Default: 5001
"},{"location":"authorino-operator/#tracing","title":"Tracing","text":"Configuration of the OpenTelemetry tracing exporter.
Field Type Description Required/Default endpoint String Full endpoint of the OpenTelemetry tracing collector service (e.g. http://jaeger:14268/api/traces). Required tags Map Key-value map of fixed tags to add to all OpenTelemetry traces emitted by Authorino. Optional insecure Boolean Enable/disable insecure connection to the tracing endpoint Default: false
"},{"location":"authorino-operator/#metrics","title":"Metrics","text":"Configuration of the metrics server.
Field Type Description Required/Default port Integer Port number of the metrics server. Default: 8080
deep Boolean Enable/disable metrics at the level of each evaluator config (if requested in the AuthConfig
) exported by the metrics server. Default: false
"},{"location":"authorino-operator/#healthz","title":"Healthz","text":"Configuration of the health/readiness probe (port).
Field Type Description Required/Default port Integer Port number of the health/readiness probe. Default: 8081
"},{"location":"authorino-operator/#volumesspec","title":"VolumesSpec","text":"Additional volumes to project in the Authorino pods. Useful for validation of TLS self-signed certificates of external services known to have to be contacted by Authorino at runtime.
Field Type Description Required/Default items []VolumeSpec List of additional volume items to project. Optional defaultMode Integer Mode bits used to set permissions on the files. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. Optional"},{"location":"authorino-operator/#volumespec","title":"VolumeSpec","text":"Field Type Description Required/Default name String Name of the volume and volume mount within the Deployment. It must be unique in the CR. Optional mountPath String Absolute path where to mount all the items. Required configMaps []String List of of Kubernetes ConfigMap names to mount. Required exactly one of: confiMaps
, secrets
. secrets []String List of of Kubernetes Secret names to mount. Required exactly one of: confiMaps
, secrets
. items []KeyToPath Mount details for selecting specific ConfigMap or Secret entries. Optional"},{"location":"authorino-operator/#full-example","title":"Full example","text":"apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n name: authorino\nspec:\n clusterWide: true\n authConfigLabelSelectors: environment=production\n secretLabelSelectors: authorino.kuadrant.io/component=authorino,environment=production\n\n replicas: 2\n\n evaluatorCacheSize: 2 # mb\n\n image: quay.io/kuadrant/authorino:latest\n imagePullPolicy: Always\n\n logLevel: debug\n logMode: production\n\n listener:\n ports:\n grpc: 50001\n http: 5001\n tls:\n enabled: true\n certSecretRef:\n name: authorino-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n oidcServer:\n port: 8083\n tls:\n enabled: true\n certSecretRef:\n name: authorino-oidc-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n metrics:\n port: 8080\n deep: true\n\n volumes:\n items:\n\n - name: keycloak-tls-cert\n mountPath: /etc/ssl/certs\n configMaps:\n - keycloak-tls-cert\n items: # details to mount the k8s configmap in the authorino pods\n - key: keycloak.crt\n path: keycloak.crt\n defaultMode: 420\n
"},{"location":"limitador/","title":"Limitador","text":"Limitador is a generic rate-limiter written in Rust. It can be used as a library, or as a service. The service exposes HTTP endpoints to apply and observe limits. Limitador can be used with Envoy because it also exposes a grpc service, on a different port, that implements the Envoy Rate Limit protocol (v3).
- Getting started
- How it works
- Configuration
- Development
- Testing Environment
- Kubernetes
- Contributing
- License
Limitador is under active development, and its API has not been stabilized yet.
"},{"location":"limitador/#getting-started","title":"Getting started","text":" - Rust library
- Server
"},{"location":"limitador/#rust-library","title":"Rust library","text":"Add this to your Cargo.toml
:
[dependencies]\nlimitador = { version = \"0.3.0\" }\n
For more information, see the README
of the crate
"},{"location":"limitador/#server","title":"Server","text":"Run with Docker (replace latest
with the version you want):
docker run --rm --net=host -it quay.io/kuadrant/limitador:v1.0.0\n
Run locally:
cargo run --release --bin limitador-server -- --help\n
Refer to the help message on how to start up the server. More information are available in the server's README.md
"},{"location":"limitador/#development","title":"Development","text":""},{"location":"limitador/#build","title":"Build","text":"cargo build\n
"},{"location":"limitador/#run-the-tests","title":"Run the tests","text":"Some tests need a redis deployed in localhost:6379
. You can run it in Docker with:
docker run --rm -p 6379:6379 -it redis\n
Then, run the tests:
cargo test --all-features\n
or you can run tests disabling the \"redis storage\" feature:
cd limitador; cargo test --no-default-features\n
"},{"location":"limitador/#contributing","title":"Contributing","text":"Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"limitador/#license","title":"License","text":"Apache 2.0 License
"},{"location":"limitador/doc/how-it-works/","title":"How it works","text":""},{"location":"limitador/doc/how-it-works/#how-it-works","title":"How it works","text":"Limitador ensures that the most restrictive limit configuration will apply.
Limitador will try to match each incoming descriptor with the same namespaced counter's conditions and variables. The namespace for the descriptors is defined by the domain
field whereas for the rate limit configuration the namespace
field is being used. For each matching counter, the counter is increased and the limits checked.
One example to illustrate:
Let's say we have 1 rate limit configuration (one counter per config):
conditions: [\"KEY_A == 'VALUE_A'\"]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Limitador receives one descriptor with two entries:
domain: example.org\ndescriptors:\n\n - entries:\n - KEY_A: VALUE_A\n - OTHER_KEY: OTHER_VALUE\n
The counter's condition will match. Then, the counter will be increased and the limit checked. If the limit is exceeded, the request will be rejected with 429 Too Many Requests
, otherwise accepted.
Note that the counter is being activated even though it does not match all the entries of the descriptor. The same rule applies for the variables field.
Currently, the implementation of condition only allow for equal (==
) and not equal (!=
) operators. More operators will be implemented based off the use cases for them.
The variables field is a list of keys. The matching rule is defined just as the existence of the list of descriptor entries with the same key values. If variables is variables: [A, B, C]
, one descriptor matches if it has at least three entries with the same A, B, C keys.
Few examples to illustrate.
Having the following descriptors:
domain: example.org\ndescriptors:\n\n - entries:\n - KEY_A: VALUE_A\n - OTHER_KEY: OTHER_VALUE\n
the following counters would not be activated.
conditions: [\"KEY_B == 'VALUE_B'\"]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Reason: conditions key does not exist conditions:\n\n - \"KEY_A == 'VALUE_A'\"\n - \"OTHER_KEY == 'WRONG_VALUE'\"\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
Reason: not all the conditions match conditions: []\nmax_value: 1\nseconds: 60\nvariables: [\"MY_VAR\"]\nnamespace: example.org\n
Reason: the variable name does not exist conditions: [\"KEY_B == 'VALUE_B'\"]\nmax_value: 1\nseconds: 60\nvariables: [\"MY_VAR\"]\nnamespace: example.org\n
Reason: Both variables and conditions must match. In this particular case, only conditions match"},{"location":"limitador/doc/topologies/","title":"Deployment topologies","text":""},{"location":"limitador/doc/topologies/#in-memory","title":"In-memory","text":""},{"location":"limitador/doc/topologies/#redis","title":"Redis","text":""},{"location":"limitador/doc/topologies/#redis-active-active-storage","title":"Redis active-active storage","text":"The RedisLabs version of Redis supports active-active replication. Limitador is compatible with that deployment mode, but there are a few things to take into account regarding limit accuracy.
"},{"location":"limitador/doc/topologies/#considerations","title":"Considerations","text":"With an active-active deployment, the data needs to be replicated between instances. An update in an instance takes a short time to be reflected in the other. That time lag depends mainly on the network speed between the Redis instances, and it affects the accuracy of the rate-limiting performed by Limitador because it can go over limits while the updates of the counters are being replicated.
The impact of that greatly depends on the use case. With limits of a few seconds, and a low number of hits, we could easily go over limits. On the other hand, if we have defined limits with a high number of hits and a long period, the effect will be basically negligible. For example, if we define a limit of one hour, and we know that the data takes around one second to be replicated, the accuracy loss is going to be negligible.
"},{"location":"limitador/doc/topologies/#set-up","title":"Set up","text":"In order to try active-active replication, you can follow this tutorial from RedisLabs.
"},{"location":"limitador/doc/topologies/#disk","title":"Disk","text":"Disk storage using RocksDB. Counters are held on disk (persistent).
"},{"location":"limitador/doc/migrations/conditions/","title":"New condition syntax","text":"With limitador-server
version 1.0.0
(and the limitador
crate version 0.3.0
), the syntax for condition
s within limit
definitions has changed.
"},{"location":"limitador/doc/migrations/conditions/#changes","title":"Changes","text":""},{"location":"limitador/doc/migrations/conditions/#the-new-syntax","title":"The new syntax","text":"The new syntax formalizes what part of an expression is the identifier and which is the value to test against. Identifiers are simple string value, while string literals are to be demarcated by single quotes ('
) or double quotes (\"
) so that foo == \" bar\"
now makes it explicit that the value is to be prefixed with a space character.
A few remarks:
- Only
string
values are supported, as that's what they really are - There is no escape character sequence supported in string literals
- A new operator has been added,
!=
"},{"location":"limitador/doc/migrations/conditions/#the-issue-with-the-deprecated-syntax","title":"The issue with the deprecated syntax","text":"The previous syntax wouldn't differentiate between values and the identifier, so that foo == bar
was valid. In this case foo
was the identifier of the variable, while bar
was the value to evaluate it against. Whitespaces before and after the operator ==
would be equally important. SO that foo == bar
would test for a foo
variable being equal to bar
where the trailing whitespace after the identifier, and the one prefixing the value, would have been evaluated.
"},{"location":"limitador/doc/migrations/conditions/#server-binary-users","title":"Server binary users","text":"The server still allows for the deprecated syntax, but warns about its usage. You can easily migrate your limits file, using the following command:
limitador-server --validate old_limits.yaml > updated_limits.yaml\n
Which should output Deprecated syntax for conditions corrected!
to stderr
while stdout
would be the limits using the new syntax. It is recommended you manually verify the resulting LIMITS_FILE
.
"},{"location":"limitador/doc/migrations/conditions/#crate-users","title":"Crate users","text":"A feature lenient_conditions
has been added, which lets you use the syntax used in previous version of the crate. The function limitador::limit::check_deprecated_syntax_usages_and_reset()
lets you verify if the deprecated syntax has been used as limit::Limit
s are created with their condition strings using the deprecated syntax.
"},{"location":"limitador/doc/server/configuration/","title":"Limitador configuration","text":""},{"location":"limitador/doc/server/configuration/#command-line-configuration","title":"Command line configuration","text":"The preferred way of starting and configuring the Limitador server is using the command line:
Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n memory Counters are held in Limitador (ephemeral)\n disk Counters are held on disk (persistent)\n redis Uses Redis to store counters\n redis_cached Uses Redis to store counters, with an in-memory cache\n\nArguments:\n <LIMITS_FILE> The limit file to use\n\nOptions:\n -b, --rls-ip <ip>\n The IP to listen on for RLS [default: 0.0.0.0]\n -p, --rls-port <port>\n The port to listen on for RLS [default: 8081]\n -B, --http-ip <http_ip>\n The IP to listen on for HTTP [default: 0.0.0.0]\n -P, --http-port <http_port>\n The port to listen on for HTTP [default: 8080]\n -l, --limit-name-in-labels\n Include the Limit Name in prometheus label\n -v...\n Sets the level of verbosity\n --tracing-endpoint <tracing_endpoint>\n The endpoint for the tracing service\n --validate\n Validates the LIMITS_FILE and exits\n -H, --rate-limit-headers <rate_limit_headers>\n Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n --grpc-reflection-service\n Enables gRPC server reflection service\n -h, --help\n Print help\n -V, --version\n Print version\n
The values used are authoritative over any environment variables independently set.
"},{"location":"limitador/doc/server/configuration/#limit-definitions","title":"Limit definitions","text":"The LIMITS_FILE
provided is the source of truth for all the limits that will be enforced. The file location will be monitored by the server for any changes and be hot reloaded. If the changes are invalid, they will be ignored on hot reload, or the server will fail to start.
"},{"location":"limitador/doc/server/configuration/#the-limits_files-format","title":"The LIMITS_FILE
's format","text":"When starting the server, you point it to a LIMITS_FILE
, which is expected to be a yaml file with an array of limit
definitions, with the following format:
---\n\"$schema\": http://json-schema.org/draft-04/schema#\ntype: object\nproperties:\n name:\n type: string\n namespace:\n type: string\n seconds:\n type: integer\n max_value:\n type: integer\n conditions:\n type: array\n items:\n\n - type: string\n variables:\n type: array\n items:\n - type: string\nrequired:\n - namespace\n - seconds\n - max_value\n - conditions\n - variables\n
Here is an example of such a limit definition:
namespace: example.org\nmax_value: 10\nseconds: 60\nconditions:\n\n - \"req.method == 'GET'\"\nvariables:\n - user_id\n
namespace
namespaces the limit, will generally be the domain, see here seconds
is the duration for which the limit applies, in seconds: e.g. 60
is a span of time of one minute max_value
is the actual limit, e.g. 100
would limit to 100 requests name
lets the user optionally name the limit variables
is an array of variables, which once resolved, will be used to qualify counters for the limit, e.g. api_key
to limit per api keys conditions
is an array of conditions, which once evaluated will decide whether to apply the limit or not
"},{"location":"limitador/doc/server/configuration/#condition-syntax","title":"condition
syntax","text":"Each condition
is an expression producing a boolean value (true
or false
). All conditions
must evaluate to true
for the limit
to be applied on a request.
Expressions follow the following syntax: $IDENTIFIER $OP $STRING_LITERAL
, where:
$IDENTIFIER
will be used to resolve the value at evaluation time, e.g. role
$OP
is an operator, either ==
or !=
$STRING_LITERAL
is a literal string value, \"
or '
demarcated, e.g. \"admin\"
So that role != \"admin\"
would apply the limit on request from all users, but admin
's.
"},{"location":"limitador/doc/server/configuration/#counter-storages","title":"Counter storages","text":"Limitador will load all the limit
definitions from the LIMITS_FILE
and keep these in memory. To enforce these limits, Limitador needs to track requests in the form of counters. There would be at least one counter per limit, but that number grows when variables
are used to qualify counters per some arbitrary values.
"},{"location":"limitador/doc/server/configuration/#memory","title":"memory
","text":"As the name implies, Limitador will keep all counters in memory. This yields the best results in terms of latency as well as accuracy. By default, only up to 1000
\"concurrent\" counters will be kept around, evicting the oldest entries. \"Concurrent\" in this context means counters that need to exist at the \"same time\", based of the period of the limit, as \"expired\" counters are discarded.
This storage is ephemeral, as if the process is restarted, all the counters are lost and effectively \"reset\" all the limits as if no traffic had been rate limited, which can be fine for short-lived limits, less for longer-lived ones.
"},{"location":"limitador/doc/server/configuration/#redis","title":"redis
","text":"When you want persistence of your counters, such as for disaster recovery or across restarts, using redis
will store the counters in a redis instance using the provided URL
. Increments to individual counters is made within redis itself, providing accuracy over these, races tho can occur when multiple Limitador servers are used against a single redis and using \"stacked\" limits (i.e. over different periods). Latency is also impacted, as it results in one additional hop to talk to redis and maintain the counters.
TLS Support
Connect to a redis instance using the rediss://
URL scheme.
To enable insecure mode, append #insecure
at the end of the URL. For example:
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n
Authentication
To enable authentication, use the username and password properties of the URL scheme. For example:
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n
when the username is omitted, redis assumes default
user. For example:
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n
Usage
Uses Redis to store counters\n\nUsage: limitador-server <LIMITS_FILE> redis <URL>\n\nArguments:\n <URL> Redis URL to use\n\nOptions:\n -h, --help Print help\n
"},{"location":"limitador/doc/server/configuration/#redis_cached","title":"redis_cached
","text":"In order to avoid some communication overhead to redis, redis_cached
adds an in memory caching layer within the Limitador servers. This lowers the latency, but sacrifices some accuracy as it will not only cache counters, but also coalesce counters updates to redis over time. See this configuration option for more information.
TLS Support
Connect to a redis instance using the rediss://
URL scheme.
To enable insecure mode, append #insecure
at the end of the URL. For example:
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n
Authentication
To enable authentication, use the username and password properties of the URL scheme. For example:
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n
when the username is omitted, redis assumes default
user. For example:
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n
Usage
Uses Redis to store counters, with an in-memory cache\n\nUsage: limitador-server <LIMITS_FILE> redis_cached [OPTIONS] <URL>\n\nArguments:\n <URL> Redis URL to use\n\nOptions:\n --batch-size <batch> Size of entries to flush in as single flush [default: 100]\n --flush-period <flush> Flushing period for counters in milliseconds [default: 1000]\n --max-cached <max> Maximum amount of counters cached [default: 10000]\n --response-timeout <timeout> Timeout for Redis commands in milliseconds [default: 350]\n -h, --help Print help\n
"},{"location":"limitador/doc/server/configuration/#disk","title":"disk
","text":"Disk storage using RocksDB. Counters are held on disk (persistent).
Counters are held on disk (persistent)\n\nUsage: limitador-server <LIMITS_FILE> disk [OPTIONS] <PATH>\n\nArguments:\n <PATH> Path to counter DB\n\nOptions:\n --optimize <OPTIMIZE> Optimizes either to save disk space or higher throughput [default: throughput] [possible values: throughput, disk]\n -h, --help Print help\n
For an in-depth coverage of the different topologies supported and how they affect the behavior, see the topologies' document.
"},{"location":"limitador/doc/server/configuration/#configuration-using-environment-variables","title":"Configuration using environment variables","text":"The Limitador server has some options that can be configured with environment variables. These will override the default values the server uses. Any argument used when starting the server will prevail over the environment variables.
"},{"location":"limitador/doc/server/configuration/#envoy_rls_host","title":"ENVOY_RLS_HOST
","text":" - Host where the Envoy RLS server listens.
- Optional. Defaults to
\"0.0.0.0\"
. - Format:
string
.
"},{"location":"limitador/doc/server/configuration/#envoy_rls_port","title":"ENVOY_RLS_PORT
","text":" - Port where the Envoy RLS server listens.
- Optional. Defaults to
8081
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#http_api_host","title":"HTTP_API_HOST
","text":" - Host where the HTTP server listens.
- Optional. Defaults to
\"0.0.0.0\"
. - Format:
string
.
"},{"location":"limitador/doc/server/configuration/#http_api_port","title":"HTTP_API_PORT
","text":" - Port where the HTTP API listens.
- Optional. Defaults to
8080
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#limits_file","title":"LIMITS_FILE
","text":" - YAML file that contains the limits to create when Limitador boots. If the limits specified already have counters associated, Limitador will not delete them. Changes to the file will be picked up by the running server.
- Required. No default
- Format:
string
, file path.
"},{"location":"limitador/doc/server/configuration/#limit_name_in_prometheus_labels","title":"LIMIT_NAME_IN_PROMETHEUS_LABELS
","text":" - Enables using limit names as labels in Prometheus metrics. This is disabled by default because for a few limits it should be fine, but it could become a problem when defining lots of limits. See the caution note in the Prometheus docs
- Optional. Disabled by default.
- Format:
bool
, set to \"1\"
to enable.
"},{"location":"limitador/doc/server/configuration/#tracing_endpoint","title":"TRACING_ENDPOINT
","text":" - The endpoint of the OTLP tracing collector (scheme://host:port).
- Optional. Default to
\"\"
(tracing disabled) - Format:
string
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_enabled","title":"REDIS_LOCAL_CACHE_ENABLED
","text":" - Enables a storage implementation that uses Redis, but also caches some data in memory. The idea is to improve throughput and latencies by caching the counters in memory to reduce the number of accesses to Redis. To achieve that, this mode sacrifices some rate-limit accuracy. This mode does two things:
- Batches counter updates. Instead of updating the counters on every request, it updates them in memory and commits them to Redis in batches. The flushing interval can be configured with the
REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS
env. The trade-off is that when running several instances of Limitador, other instances will not become aware of the counter updates until they're committed to Redis. - Caches counters. Instead of fetching the value of a counter every time it's needed, the value is cached for a configurable period. The trade-off is that when running several instances of Limitador, an instance will not become aware of the counter updates other instances do while the value is cached. When a counter is already at 0 (limit exceeded), it's cached until it expires in Redis. In this case, no matter what other instances do, we know that the quota will not be reestablished until the key expires in Redis, so in this case, rate-limit accuracy is not affected. When a counter has still some quota remaining the situation is different, that's why we can tune for how long it will be cached. The formula is as follows: MIN(ttl_in_redis/
REDIS_LOCAL_CACHE_TTL_RATIO_CACHED_COUNTERS
, REDIS_LOCAL_CACHE_MAX_TTL_CACHED_COUNTERS_MS
). For example, let's image that the current TTL (time remaining until the limit resets) in Redis for a counter is 10 seconds, and we set the ratio to 2, and the max time for 30s. In this case, the counter will be cached for 5s (min(10/2, 30)). During those 5s, Limitador will not fetch the value of that counter from Redis, so it will answer faster, but it will also miss the updates done by other instances, so it can go over the limits in that 5s interval.
- Optional. Disabled by default.
- Format: set to \"1\" to enable.
- Note: \"REDIS_URL\" needs to be set.
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_flushing_period_ms","title":"REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS
","text":" - Used to configure the maximum flushing period. See
REDIS_LOCAL_CACHE_ENABLED
. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1
. - Optional. Defaults to
1000
. - Format:
integer
. Duration in milliseconds.
"},{"location":"limitador/doc/server/configuration/#redis_local_cache_batch_size","title":"REDIS_LOCAL_CACHE_BATCH_SIZE
","text":" - Used to configure the maximum number of counters to update in a flush. See
REDIS_LOCAL_CACHE_ENABLED
. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1
. - Optional. Defaults to
100
. - Format:
integer
.
"},{"location":"limitador/doc/server/configuration/#redis_url","title":"REDIS_URL
","text":" - Redis URL. Required only when you want to use Redis to store the limits.
- Optional. By default, Limitador stores the limits in memory and does not require Redis.
- Format:
string
, URL in the format of \"redis://127.0.0.1:6379\"
.
"},{"location":"limitador/doc/server/configuration/#rust_log","title":"RUST_LOG
","text":" - Defines the log level.
- Optional. Defaults to
\"error\"
. - Format:
enum
: \"debug\"
, \"error\"
, \"info\"
, \"warn\"
, or \"trace\"
.
"},{"location":"limitador/doc/server/configuration/#rate_limit_headers","title":"RATE_LIMIT_HEADERS
","text":" - Enables rate limit response headers. Only supported by the RLS server.
- Optional. Defaults to
\"NONE\"
. - Must be one of:
\"NONE\"
- Does not add any additional headers to the http response. \"DRAFT_VERSION_03\"
. Adds response headers per https://datatracker.ietf.org/doc/id/draft-polli-ratelimit-headers-03.html
"},{"location":"limitador/limitador/","title":"Limitador (library)","text":"An embeddable rate-limiter library supporting in-memory, Redis and disk data stores.
For the complete documentation of the crate's API, please refer to docs.rs
"},{"location":"limitador/limitador/#features","title":"Features","text":" redis_storage
: support for using Redis as the data storage backend. disk_storage
: support for using RocksDB as a local disk storage backend. lenient_conditions
: support for the deprecated syntax of Condition
s default
: redis_storage
.
"},{"location":"limitador/limitador-server/","title":"Limitador (server)","text":"By default, Limitador starts the HTTP server in localhost:8080
, and the grpc service that implements the Envoy Rate Limit protocol in localhost:8081
. That can be configured with these ENVs: ENVOY_RLS_HOST
, ENVOY_RLS_PORT
, HTTP_API_HOST
, and HTTP_API_PORT
.
Or using the command line arguments:
Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n memory Counters are held in Limitador (ephemeral)\n disk Counters are held on disk (persistent)\n redis Uses Redis to store counters\n redis_cached Uses Redis to store counters, with an in-memory cache\n\nArguments:\n <LIMITS_FILE> The limit file to use\n\nOptions:\n -b, --rls-ip <ip>\n The IP to listen on for RLS [default: 0.0.0.0]\n -p, --rls-port <port>\n The port to listen on for RLS [default: 8081]\n -B, --http-ip <http_ip>\n The IP to listen on for HTTP [default: 0.0.0.0]\n -P, --http-port <http_port>\n The port to listen on for HTTP [default: 8080]\n -l, --limit-name-in-labels\n Include the Limit Name in prometheus label\n -v...\n Sets the level of verbosity\n --tracing-endpoint <tracing_endpoint>\n The endpoint for the tracing service\n --validate\n Validates the LIMITS_FILE and exits\n -H, --rate-limit-headers <rate_limit_headers>\n Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n -h, --help\n Print help\n -V, --version\n Print version\n
When using environment variables, these will override the defaults. While environment variable are themselves overridden by the command line arguments provided. See the individual STORAGES
help for more options relative to each of the storages.
The OpenAPI spec of the HTTP service is here.
Limitador has to be started with a YAML file that has some limits defined. There's an example file that allows 10 requests per minute and per user_id
when the HTTP method is \"GET\"
and 5 when it is a \"POST\"
. You can run it with Docker (replace latest
with the version you want):
docker run --rm --net=host -it -v $(pwd)/examples/limits.yaml:/home/limitador/my_limits.yaml:ro quay.io/kuadrant/limitador:latest limitador-server /home/limitador/my_limits.yaml\n
You can also use the YAML file when running locally:
cargo run --release --bin limitador-server ./examples/limits.yaml\n
If you want to use Limitador with Envoy, there's a minimal Envoy config for testing purposes here. The config forwards the \"userid\" header and the request method to Limitador. It assumes that there's an upstream API deployed on port 1323. You can use echo, for example.
Limitador has several options that can be configured via ENV. This doc specifies them.
"},{"location":"limitador/limitador-server/#limits-storage","title":"Limits storage","text":"Limitador can store its limits and counters in-memory, disk or in Redis. In-memory is faster, but the limits are applied per instance. When using Redis, multiple instances of Limitador can share the same limits, but it's slower.
"},{"location":"limitador/limitador-server/kubernetes/","title":"Kubernetes","text":"The purpose of this documentation is to deploy a sample application published via AWS ELB, that will be ratelimited at infrastructure level, thanks to the use the envoyproxy sidecar container, that will be in charge of contacting to a ratelimit service (limitador), that will allow the request (or not) if it is within the permitted limits.
There are mainly two recommended way of using limitador in kubernetes:
- There is an ingress based on envoyproxy that contacts with limitador ratelimit service before forwarding (or not) the request to the application
- There is an envoyproxy sidecar container living in the application pod that contacts with limitador ratelimit service before forwarding (or not) the request to the main application container in the same pod
In this example it will be described the second scenario (where there is an application with an envoyproxy sidecar container contacting to limitador service).
NOTE If you don't want to manually manage the sidecar container definitions on your deployments (harcoding the container spec, loading the envoy configuration from a configmap that requires a pod restart to reload possibles configuration changes...), you can use marin3r, a light weight envoy control plane that allows you to inject envoyproxy sidecar containers and dynamically consume configs from Kubernetes custom resources.
This is the network diagram of the deployed example:
"},{"location":"limitador/limitador-server/kubernetes/#components","title":"Components","text":"In order to that that ratelimit test, you need to deploy a few components. Some of them are mandatory, and a few are optional:
"},{"location":"limitador/limitador-server/kubernetes/#mandatory","title":"Mandatory","text":" - Application (a sample application deployment called
kuard
): - App has an
envoyproxy
sidecar container with its configuration file in a configmap, composed by: - Cluster
kuard
points to main application container (127.0.0.1:8080
) - Cluster
kuard_ratelimit
points to limitador headless service (limitador:8081
) - Listener HTTP points to envoyproxy sidecar (
0.0.0.0:38080
) - When envoy contacts with the ratelimit service, you can define a timeout, and if there is no response within that timeout (because ratelimit is overloaded taking more time to process the request, or because rateliit service is down), you can choose from envoy to deny the request or pass it to the application. In this case, there is set a 1s timeout, and if there is no answer in this 1 second, request is passed to the application (
failure_mode_deny: false
), so we guarantee that the maximum overhead added by a non working ratelimit service is 1 extra second to the final response time.
-
App service published with type: LoadBalancer
, which creates a AWS ELB. This service has an annotation to enable proxy protocol on the AWS Load balancer in order to be able to keep the real client IP at envoy level (instead of the k8s node private IP), so it can be used to ratelimit per each real client IP if desired.
-
Ratelimit application (a deployment called limitador
):
- Limitador Configmap with limits definition (1000 rps per hostname).
-
Limitador headless service published on limitador:8081
. It is important to use a headless service in order to balance correctly the traffic between limitador pods, otherwise GRPC connections are not well balanced.
-
Redis database to persist ratelimit configuration:
- Redis service
- Redis statefulset with a persistent volume
"},{"location":"limitador/limitador-server/kubernetes/#optional","title":"Optional","text":" - Centos pod:
- Used to executed
hey
tool benchmarks from the cluster, so we ensure network latency does not affect the results. Actually, to achieve better results, this pod should be on another cluster (to not share the network between client and network) and be placed on the same Region (to reduce latency). The client could be a bottle neck for the performance test. - This centos is going to public AWS ELB to access the app, so simulating it is a normal client from the same Region
- Prometheus monitoring and grafana dashboard resources
"},{"location":"limitador/limitador-server/kubernetes/#k8s-deployment","title":"K8s deployment","text":" -
Deploy the redis instance that will keep the limits for different limitador pods:
kubectl apply -f redis-service.yaml\nkubectl apply -f redis-statefulset.yaml\n
-
Deploy limitador application. It is important to create the configmap with limitador limits before the deployment, in order to load it from limitador pods. At the moment, if you update the limits configmap you need to restart the pods. Additionally, limitador has an API in order to load limits dynamically, but for simplicity for this test a configmap has been used:
kubectl apply -f limitador-config-configmap.yaml\nkubectl apply -f limitador-service.yaml\nkubectl apply -f limitador-deployment.yaml\n
-
Deploy sample kuard application with the envoyproxy sidecar container (if you do any change on the envoy configmap, remember you need to restart app pods in order to reload the config):
kubectl apply -f kuard-envoy-config-configmap.yaml\nkubectl apply -f kuard-service.yaml\nkubectl apply -f kuard-deployment.yaml\n
-
At this point you shoud see all pods running, and kuard pods should have 2 containers (the main kuard container, and the envoyproxy sidecar container):
\u25b6 kubectl get pods\nNAME READY STATUS RESTARTS AGE\nkuard-f859bb896-gmzxn 2/2 Running 0 7m\nkuard-f859bb896-z95w8 2/2 Running 0 7m\nlimitador-68d494f54c-qv996 1/1 Running 0 8m\nlimitador-68d494f54c-zzmhn 1/1 Running 0 8m\nredis-0 1/1 Running 0 9m\n
-
Now you should be able to access to kuard application using the load balancer DNS name:
\u25b6 kubectl get service kuard\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nkuard LoadBalancer 172.30.117.198 a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com 80:31450/TCP 4m\n
-
If you go to the browser and paste the EXTERNAL-IP
, your request will follow the next workflow:
- The requests will go from your local machine through internet to the public AWS ELB where the app is published
- Then it will go to the
NodePort
of your k8s cluster nodes - Once on a k8s node, it will go to kuard
Service
Virtual IP, and will arrive to an envoyproxy sidecar container inside kuard pod - Envoyproxy sidecar container will contact with limitador headless
Service
, to authorize the requests or not: - If the request is authorized (within the configured limits), it will send the request to the app container (
0.0.0.0:8080
) in the same pod, and request will end up with a HTTP 200
response - If the request is limited (beyond the limits), request will end up with
HTTP 429
response
"},{"location":"limitador/limitador-server/kubernetes/#monitoring","title":"Monitoring","text":"Both envoyproxy
sidecar and limitador
applications include built-in prometheus metrics.
"},{"location":"limitador/limitador-server/kubernetes/#prometheus","title":"Prometheus","text":"In order to scrape that metrics within a prometheus-operator deployed in the cluster, you need to create a PodMonitor
resource for every application:
kubectl apply -f kuard-podmonitor.yaml\nkubectl apply -f limitador-podmonitor.yaml\n
"},{"location":"limitador/limitador-server/kubernetes/#grafana-dashboard","title":"Grafana dashboard","text":"Then, if you have grafana deployed in the cluster, you can import a Kuadrant Limitador grafana dashboard that we have prepared, which includes:
- Kuard envoyproxy sidecar metrics (globally and per pod)
- Limitador metrics (globally and per pod)
- And for every deployed component (limitador, kuard, redis):
- Number of pods (total, available, unavaible, pod restarts...)
- CPU usage per pod
- Memory usage per pod
- Network usage per pod
"},{"location":"limitador/limitador-server/kubernetes/#benchmarking","title":"Benchmarking","text":" - In order to check that the ratelimit is working as expected, you can use any benchmarking tool, like hey
- You can use if you want a centos pod (better to create it on a different custer within the same Region):
kubectl apply -f centos-pod.yaml\n
- Connect to centos pod:
kubectl exec --stdin --tty centos -- /bin/bash\n
- And install
hey
with: [root@centos /]# curl -sf https://gobinaries.com/rakyll/hey | sh\n
- Now you can execute the benchmark using the following escenario:
Item Value Target AWS ELB DNS Name App pods 2 Limitador pods 2 Limits 1.000 rps per hostname Hey duration 1 minute Hey Traffic -c 60 -q 20 (around 1.200 rps) - Theoretically:
- It should let pass 1.000 requests, and limit 200 requests per second
- It should let pass 60 * 1.000 = 60.0000 requests, and limit 60 * 200 = 12.000 requests per minute
- Each limitador pod should handle half of the traffic (500 rps OK, and 200 rps limited)
[root@centos /]# hey -z 60s -c 60 -q 20 \"http://a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com\"\n\nSummary:\n Total: 60.0131 secs\n Slowest: 0.1028 secs\n Fastest: 0.0023 secs\n Average: 0.0075 secs\n Requests/sec: 1199.3721\n\n Total data: 106581650 bytes\n Size/request: 1480 bytes\n\nResponse time histogram:\n 0.002 [1] |\n 0.012 [70626] |\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\n 0.022 [1291] |\u25a0\n 0.032 [0] |\n 0.043 [0] |\n 0.053 [0] |\n 0.063 [0] |\n 0.073 [0] |\n 0.083 [0] |\n 0.093 [23] |\n 0.103 [37] |\n\n\nLatency distribution:\n 10% in 0.0053 secs\n 25% in 0.0063 secs\n 50% in 0.0073 secs\n 75% in 0.0085 secs\n 90% in 0.0096 secs\n 95% in 0.0102 secs\n 99% in 0.0139 secs\n\nDetails (average, fastest, slowest):\n DNS+dialup: 0.0001 secs, 0.0023 secs, 0.1028 secs\n DNS-lookup: 0.0001 secs, 0.0000 secs, 0.0711 secs\n req write: 0.0000 secs, 0.0000 secs, 0.0014 secs\n resp wait: 0.0074 secs, 0.0023 secs, 0.0303 secs\n resp read: 0.0000 secs, 0.0000 secs, 0.0049 secs\n\nStatus code distribution:\n [200] 60046 responses\n [429] 11932 responses\n
-
We can see that:
- Client could send 1192.2171rps (about 1200rps)
- 60046 requests (about 60000) were OK (HTTP 200)
- 11932 requests (about 12000) were limited (HTTP 429)
- Average latency (since the request goes out from the client to AWS ELB, k8s node, envoyproxy container, limitador+redis, kuar app container) is 10ms
-
In addition, if we do a longer test with 5 minutes traffic for example, you can check with the grafana dashboard how these requests are processed by envoyproxy sidecar container of kuard pods and limitador pods:
- Kuard Envoyproxy Sidecar Metrics:
- Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
- Each envoyproxy sidecar of each kuard pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is not 100% perfect, caused by random iptables forwarding when using a k8s service
- Limitador Metrics:
- Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
- Each limitador pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is perfect thanks to using a headless service with GRPC connections
"},{"location":"limitador/limitador-server/sandbox/","title":"Sandbox","text":""},{"location":"limitador/limitador-server/sandbox/#testing-environment","title":"Testing Environment","text":""},{"location":"limitador/limitador-server/sandbox/#requirements","title":"Requirements","text":" - docker v24+
"},{"location":"limitador/limitador-server/sandbox/#setup","title":"Setup","text":"Clone the project
git clone https://github.com/Kuadrant/limitador.git\ncd limitador/limitador-server/sandbox\n
Check out make help
for all the targets.
"},{"location":"limitador/limitador-server/sandbox/#deployment-options","title":"Deployment options","text":"Limitador's configuration Command Info In-memory configuration make deploy-in-memory
Counters are held in Limitador (ephemeral) Redis make deploy-redis
Uses Redis to store counters Redis Secured make deploy-redis-tls
Uses Redis with TLS and password protected to store counters Redis Cached make deploy-redis-cached
Uses Redis to store counters, with an in-memory cache Redis Otel Instrumented make deploy-redis-otel
Uses redis to store counters, instrumented with opentelemetry Disk make deploy-disk
Uses disk to store counters"},{"location":"limitador/limitador-server/sandbox/#limitadors-admin-http-endpoint","title":"Limitador's admin HTTP endpoint","text":"Limits
curl -i http://127.0.0.1:18080/limits/test_namespace\n
Counters
curl -i http://127.0.0.1:18080/counters/test_namespace\n
Metrics
curl -i http://127.0.0.1:18080/metrics\n
"},{"location":"limitador/limitador-server/sandbox/#limitadors-grpc-ratelimitservice-endpoint","title":"Limitador's GRPC RateLimitService endpoint","text":"Get grpcurl
. You need Go SDK installed.
Golang version >= 1.18 (from fullstorydev/grpcurl)
make grpcurl\n
Inspect RateLimitService
GRPC service
bin/grpcurl -plaintext 127.0.0.1:18081 describe envoy.service.ratelimit.v3.RateLimitService\n
Make a custom request
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
Do repeated requests. As the limit is set to max 5 request for 60 seconds, you should see OVER_LIMIT
response after 5 requests.
while :; do bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM; sleep 1; done\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
"},{"location":"limitador/limitador-server/sandbox/#downstream-traffic","title":"Downstream traffic","text":"Upstream service implemented by httpbin.org
curl -i -H \"Host: example.com\" http://127.0.0.1:18000/get\n
"},{"location":"limitador/limitador-server/sandbox/#limitador-image","title":"Limitador Image","text":"By default, the sandbox will run Limitador's limitador-testing:latest
image.
Building limitador-testing:latest
image
You can easily build the limitador's image from the current workspace code base with:
make build\n
The image will be tagged with limitador-testing:latest
Using custom Limitador's image
The LIMITADOR_IMAGE
environment variable overrides the default image. For example:
make deploy-in-memory LIMITADOR_IMAGE=quay.io/kuadrant/limitador:latest\n
"},{"location":"limitador/limitador-server/sandbox/#clean-env","title":"Clean env","text":"make clean\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/","title":"Limitador instrumentation sandbox","text":"Limitador is configured to push traces to an opentelemetry collector.
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-sandbox","title":"Run sandbox","text":"make build\nmake deploy-redis-otel\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-some-traffic","title":"Run some traffic","text":"make grpcurl\n
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n \"domain\": \"test_namespace\",\n \"hits_addend\": 1,\n \"descriptors\": [\n {\n \"entries\": [\n {\n \"key\": \"req.method\",\n \"value\": \"POST\"\n }\n ]\n }\n ]\n}\nEOM\n
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#see-the-trace-in-ui","title":"See the trace in UI","text":"firefox -private-window \"localhost:16686\"\n
Recommended to start looking at check_and_update
operation.
"},{"location":"limitador/limitador-server/sandbox/redis-otel/#tear-down-sandbox","title":"Tear down sandbox","text":"make clean\n
"},{"location":"limitador/limitador-server/sandbox/redis-tls/","title":"Index","text":""},{"location":"limitador/limitador-server/sandbox/redis-tls/#testing-redis-security","title":"Testing redis security","text":"Execute bash shell in redis pod
docker compose -p sandbox exec redis /bin/bash\n
Connect to this Redis server with redis-cli:
root@e024a29b74ba:/data# redis-cli --tls --cacert /usr/local/etc/redis/certs/ca.crt -a foobared\n
"},{"location":"limitador-operator/","title":"Limitador Operator","text":""},{"location":"limitador-operator/#overview","title":"Overview","text":"The Operator to manage Limitador deployments.
"},{"location":"limitador-operator/#customresourcedefinitions","title":"CustomResourceDefinitions","text":" - Limitador, which defines a desired Limitador deployment.
"},{"location":"limitador-operator/#limitador-crd","title":"Limitador CRD","text":"Limitador v1alpha1 API reference
Example:
---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n
"},{"location":"limitador-operator/#features","title":"Features","text":" - Storage Options
- Rate Limit Headers
- Logging
- Tracing
"},{"location":"limitador-operator/#contributing","title":"Contributing","text":"The Development guide describes how to build the operator and how to test your changes before submitting a patch or opening a PR.
Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.
"},{"location":"limitador-operator/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"limitador-operator/doc/development/","title":"Development Guide","text":""},{"location":"limitador-operator/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - operator-sdk version 1.32.0
- kind version v0.22.0
- git
- go version 1.21+
- kubernetes version v1.25+
- kubectl version v1.25+
"},{"location":"limitador-operator/doc/development/#build","title":"Build","text":"make\n
"},{"location":"limitador-operator/doc/development/#run-locally","title":"Run locally","text":"You need an active session open to a kubernetes cluster.
Optionally, run kind with local-env-setup
.
make local-env-setup\n
Then, run the operator locally
make run\n
"},{"location":"limitador-operator/doc/development/#deploy-the-operator-in-a-deployment-object","title":"Deploy the operator in a deployment object","text":"make local-setup\n
"},{"location":"limitador-operator/doc/development/#deploy-the-operator-using-olm","title":"Deploy the operator using OLM","text":"You can deploy the operator using OLM just running a few commands. No need to build any image. Kuadrant engineering team provides latest
and released version tagged images. They are available in the Quay.io/Kuadrant image repository.
Create kind cluster
make kind-create-cluster\n
Deploy OLM system
make install-olm\n
Deploy the operator using OLM. The make deploy-catalog
target accepts the following variables:
Makefile Variable Description Default value CATALOG_IMG
Catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
"},{"location":"limitador-operator/doc/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"If you want to deploy (using OLM) a custom limitador operator, you need to build your own catalog.
"},{"location":"limitador-operator/doc/development/#build-operator-bundle-image","title":"Build operator bundle image","text":"The make bundle
target accepts the following variables:
Makefile Variable Description Default value Notes IMG
Operator image URL quay.io/kuadrant/limitador-operator:latest
VERSION
Bundle version 0.0.0
RELATED_IMAGE_LIMITADOR
Limitador bundle URL quay.io/kuadrant/limitador:latest
LIMITADOR_VERSION
var could be use to build this URL providing the tag - Build the bundle manifests
make bundle [IMG=quay.io/kuadrant/limitador-operator:latest] \\\n [VERSION=0.0.0] \\\n [RELATED_IMAGE_LIMITADOR=quay.io/kuadrant/limitador:latest]\n
- Build the bundle image from the manifests
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
- Push the bundle image to a registry
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
"},{"location":"limitador-operator/doc/development/#build-custom-catalog","title":"Build custom catalog","text":"The catalog format will be File-based Catalog.
Make sure all the required bundles are pushed to the registry. It is required by the opm
tool.
The make catalog
target accepts the following variables:
Makefile Variable Description Default value BUNDLE_IMG
Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
REPLACES_VERSION
Previous operator version 0.0.0-alpha
CHANNELS
Catalog channels preview
make catalog [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n [REPLACES_VERSION=0.0.0-alpha] \\\n [CHANNELS=preview]\n
- Build the catalog image from the manifests
Makefile Variable Description Default value CATALOG_IMG
Operator catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
make catalog-build [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
- Push the catalog image to a registry
make catalog-push [CATALOG_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
You can try out your custom catalog image following the steps of the Deploy the operator using OLM section.
"},{"location":"limitador-operator/doc/development/#cleaning-up","title":"Cleaning up","text":"make local-cleanup\n
"},{"location":"limitador-operator/doc/development/#run-tests","title":"Run tests","text":""},{"location":"limitador-operator/doc/development/#unittests","title":"Unittests","text":"make test-unit\n
Optionally, add TEST_NAME
makefile variable to run specific test
make test-unit TEST_NAME=TestConstants\n
or even subtest
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
"},{"location":"limitador-operator/doc/development/#integration-tests","title":"Integration tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run local cluster with kind
make local-env-setup\n
Run integration tests
make test-integration\n
"},{"location":"limitador-operator/doc/development/#all-tests","title":"All tests","text":"You need an active session open to a kubernetes cluster.
Optionally, run local cluster with kind
make local-env-setup\n
Run all tests
make test\n
"},{"location":"limitador-operator/doc/development/#lint-tests","title":"Lint tests","text":"make run-lint\n
"},{"location":"limitador-operator/doc/development/#uninstall-limitador-crd","title":"(Un)Install Limitador CRD","text":"You need an active session open to a kubernetes cluster.
Remove CRDs
make uninstall\n
"},{"location":"limitador-operator/doc/logging/","title":"Logging","text":"The limitador operator outputs 3 levels of log messages: (from lowest to highest level)
debug
info
(default) error
info
logging is restricted to high-level information. Actions like creating, deleting or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.
Only debug
logging will include processing details.
To configure the desired log level, set the environment variable LOG_LEVEL
to one of the supported values listed above. Default log level is info
.
Apart from log level, the controller can output messages to the logs in 2 different formats:
production
(default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
development
: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}
To configure the desired log mode, set the environment variable LOG_MODE
to one of the supported values listed above. Default log mode is production
.
"},{"location":"limitador-operator/doc/rate-limit-headers/","title":"Rate Limit Headers","text":"It enables RateLimit Header Fields for HTTP as specified in Rate Limit Headers Draft
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n rateLimitHeaders: DRAFT_VERSION_03\n
Current valid values are:
- DRAFT_VERSION_03 (ref: Rate Limit Headers Draft)
- NONE
By default, when spec.rateLimitHeaders
is null, --rate-limit-headers
command line arg is not included in the limitador's deployment.
"},{"location":"limitador-operator/doc/resource-requirements/","title":"Resource Requirements","text":"The default resource requirement for Limitador deployments is specified in Limitador v1alpha1 API reference and will be applied if the resource requirement is not set in the spec.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: [] \n
Field json/yaml field Type Required Default value Description ResourceRequirements resourceRequirements
*corev1.ResourceRequirements No {\"limits\": {\"cpu\": \"500m\",\"memory\": \"64Mi\"},\"requests\": {\"cpu\": \"250m\",\"memory\": \"32Mi\"}}
Limitador deployment resource requirements"},{"location":"limitador-operator/doc/resource-requirements/#example-with-resource-limits","title":"Example with resource limits","text":"The resource requests and limits for the deployment can be set like the following:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n resourceRequirements:\n limits:\n cpu: 200m\n memory: 400Mi\n requests:\n cpu: 101m \n memory: 201Mi \n
To specify the deployment without resource requests or limits, set an empty struct {}
to the field:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [ \"get_toy == 'yes'\" ]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n resourceRequirements: {}\n
"},{"location":"limitador-operator/doc/storage/","title":"Storage","text":"Limitador limits counters are stored in a backend storage. This is In contrast to the storage of the limits themselves, which are always stored in ephemeral memory. Limitador's operator supports several storage configurations:
- In-Memory: ephemeral and cannot be shared
- Redis: Persistent (depending on the redis storage configuration) and can be shared
- Redis Cached: Persistent (depending on the redis storage configuration) and can be shared
- Disk: Persistent (depending on the underlying disk persistence capabilities) and cannot be shared
"},{"location":"limitador-operator/doc/storage/#in-memory","title":"In-Memory","text":"Counters are held in Limitador (ephemeral)
In-Memory is the default option defined by the Limitador's Operator.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage: null\n
For any of those, one should store the URL of the Redis service, inside a K8s opaque Secret.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
"},{"location":"limitador-operator/doc/storage/#redis","title":"Redis","text":"Uses Redis to store counters.
Selected when spec.storage.redis
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n
The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador
CR.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
Note: Limitador's Operator will only read the URL
field of the secret.
"},{"location":"limitador-operator/doc/storage/#redis-cached","title":"Redis Cached","text":"Uses Redis to store counters, with an in-memory cache.
Selected when spec.storage.redis-cached
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis-cached:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n
The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador
CR.
apiVersion: v1\nkind: Secret\nmetadata:\n name: redisconfig\nstringData:\n URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
Note: Limitador's Operator will only read the URL
field of the secret.
Additionally, caching options can be specified in the spec.storage.redis-cached.options
field.
"},{"location":"limitador-operator/doc/storage/#options","title":"Options","text":"Option Description batch-size
Size of entries to flush in as single flush [default: 100] flush-period
Flushing period for counters in milliseconds [default: 1000] max-cached
Maximum amount of counters cached [default: 10000] response-timeout
Timeout for Redis commands in milliseconds [default: 350] For example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n redis-cached:\n configSecretRef: # The secret reference storing the URL for Redis\n name: redisconfig\n options: # Every option is optional\n batch-size: 50\n max-cached: 5000\n
"},{"location":"limitador-operator/doc/storage/#disk","title":"Disk","text":"Counters are held on disk (persistent). Kubernetes Persistent Volumes will be used to store counters.
Selected when spec.storage.disk
is not null
.
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk: {}\n
Additionally, disk options can be specified in the spec.storage.disk.persistentVolumeClaim
and spec.storage.disk.optimize
fields.
"},{"location":"limitador-operator/doc/storage/#persistent-volume-claim-options","title":"Persistent Volume Claim Options","text":"spec.storage.disk.persistentVolumeClaim
field is an object with the following fields.
Field Description storageClassName
StorageClass of the storage offered by cluster administrators [default: default storage class of the cluster] resources
The minimum resources the volume should have. Resources will not take any effect when VolumeName is provided. This parameter is not updateable when the underlying PV is not resizable. [default: 1Gi] volumeName
The binding reference to the existing PersistentVolume backing this claim [default: null] Example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk:\n persistentVolumeClaim:\n storageClassName: \"customClass\"\n resources:\n requests: 2Gi\n
"},{"location":"limitador-operator/doc/storage/#optimize","title":"Optimize","text":"Defines the valid optimization option of the disk persistence type.
spec.storage.disk.optimize
field is a string
type with the following valid values:
Option Description throughput
Optimizes for higher throughput. Default disk
Optimizes for disk usage Example:
apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n storage:\n disk:\n optimize: disk\n
"},{"location":"limitador-operator/doc/tracing/","title":"Tracing","text":"Limitador offers distributed tracing enablement using the .spec.tracing
CR configuration:
---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n name: limitador-sample\nspec:\n listener:\n http:\n port: 8080\n grpc:\n port: 8081\n limits:\n\n - conditions: [\"get_toy == 'yes'\"]\n max_value: 2\n namespace: toystore-app\n seconds: 30\n variables: []\n verbosity: 3\n tracing:\n endpoint: rpc://my-otlp-collector:4317\n
Currently limitador only supports collectors using the OpenTelemetry Protocol with TLS disabled. The endpoint
configuration option should contain the scheme, host and port of the service. The quantity and level of the information provided by the spans is configured via the verbosity
argument.
"},{"location":"multicluster-gateway-controller/","title":"multicluster-gateway-controller","text":""},{"location":"multicluster-gateway-controller/#description","title":"Description:","text":"The multi-cluster gateway controller, leverages the gateway API standard and Open Cluster Management to provide multi-cluster connectivity and global load balancing
Key Features:
- Central Gateway Definition that can then be distributed to multiple clusters
- Automatic TLS and cert distribution for HTTPS based listeners
- DNSPolicy to decide how North-South based traffic should be balanced and reach the gateways
- Health checks to detect and take remedial action against unhealthy endpoints
- Cloud DNS provider integrations (AWS route 53) with new ones being added (google DNS)
When deploying the multicluster gateway controller using the make targets, the following will be created:
- Kind cluster(s)
- Gateway API CRDs in the control plane cluster
- Ingress controller
- Cert manager
- LetsEncrypt certs
"},{"location":"multicluster-gateway-controller/#prerequisites","title":"Prerequisites:","text":" - AWS or GCP
- Various dependencies installed into $(pwd)/bin e.g. kind, yq etc.
- Run
make dependencies
- openssl>=3
- On macOS a later version is available with
brew install openssl
. You'll need to update your PATH as macOS provides an older version via libressl as well - On Fedora use
dnf install openssl
- go >= 1.21
"},{"location":"multicluster-gateway-controller/#1-running-the-controller-in-the-cluster","title":"1. Running the controller in the cluster:","text":" -
Set up your DNS Provider by following these steps
-
Setup your local environment
make local-setup MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER>\n
-
Build the controller image and load it into the control plane sh kubectl config use-context kind-mgc-control-plane make kind-load-gateway-controller
-
Deploy the controller(s) to the control plane cluster
make deploy-gateway-controller\n
-
(Optional) View the logs of the deployed controller
kubectl logs -f $(kubectl get pods -n multi-cluster-gateways | grep \"mgc-\" | awk '{print $1}') -n multi-cluster-gateways\n
"},{"location":"multicluster-gateway-controller/#2-running-the-controller-locally","title":"2. Running the controller locally:","text":" -
Set up your DNS Provider by following these steps
-
Setup your local environment
make local-setup MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER>\n
-
Run the controller locally:
kubectl config use-context kind-mgc-control-plane \nmake build-gateway-controller run-gateway-controller\n
"},{"location":"multicluster-gateway-controller/#3-clean-up-local-environment","title":"3. Clean up local environment","text":"In any terminal window target control plane cluster by:
kubectl config use-context kind-mgc-control-plane \n
If you want to wipe everything clean consider using: make local-cleanup # Remove kind clusters created locally and cleanup any generated local files.\n
If the intention is to cleanup kind cluster and prepare them for re-installation consider using: make local-cleanup-mgc MGC_WORKLOAD_CLUSTERS_COUNT=<NUMBER_WORKLOAD_CLUSTER> # prepares clusters for make local-setup-mgc\n
"},{"location":"multicluster-gateway-controller/#license","title":"License","text":"Copyright 2022 Red Hat.
Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0\n
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/","title":"Debugging in VS code","text":""},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#introduction","title":"Introduction","text":"The following document will show how to setup debugging for multi gateway controller.
There is an included VSCode launch.json
.
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#starting-the-controller","title":"Starting the controller","text":"Instead of starting the Gateway Controller via something like:
make build-{policy | gateway}-controller install run-{policy | gateway}-controller\n
You can now simply hit F5
in VSCode. The controller will launch with the following config:
{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug\",\n \"type\": \"go\",\n \"request\": \"launch\",\n \"mode\": \"auto\",\n \"program\": \"./cmd/controller/main.go\",\n \"args\": [\n \"--metrics-bind-address=:8080\",\n \"--health-probe-bind-address=:8081\"\n ]\n }\n ]\n}\n
"},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#running-debugger","title":"Running Debugger","text":""},{"location":"multicluster-gateway-controller/docs/contribution/vscode-debugging/#debugging-tests","title":"Debugging Tests","text":""},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/","title":"Distributing Gateways with OCM","text":""},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#define-and-place-gateways","title":"Define and Place Gateways","text":"In this guide, we will go through defining a Gateway in the OCM hub cluster that can then be distributed to and instantiated on a set of managed spoke clusters.
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#prerequisites","title":"Prerequisites","text":" - Complete the Getting Started Guide to bring up a suitable environment.
If you are looking to change provider from the default Istio:
- Please have the Gateway provider of your choice installed and configured (in this example we use Envoy gateway. See the following docs)
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#initial-setup","title":"Initial setup","text":"export MGC_SUB_DOMAIN
in each terminal if you haven't already added it to your .zshrc
or .bash_profile
.
Going through the quick start above, will ensure that a supported GatewayClass
is registered in the hub cluster that the Kuadrant multi-cluster gateway controller will handle.
NOTE The quick start script will create a placement resource as part of the setup. You can use this as further inspiration for other placement resources you would like to create.
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#defining-a-gateway","title":"Defining a Gateway","text":"Once you have the Kuadrant multi-cluster gateway controller installed into the OCM hub cluster, you can begin defining and placing Gateways across your OCM managed infrastructure.
To define a Gateway and have it managed by the multi-cluster gateway controller, we need to do the following things
- Create a Gateway API Gateway resource in the Hub cluster, ensuring the gateway resource specifies the correct gateway class allowing it to be picked up and managed by the multi-cluster gateway controller
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: $MGC_SUB_DOMAIN\n port: 443\n protocol: HTTP\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#placing-a-gateway","title":"Placing a Gateway","text":"To place a gateway, we will need to create a Placement resource.
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: cluster.open-cluster-management.io/v1beta1\nkind: Placement\nmetadata:\n name: http-gateway-placement\n namespace: multi-cluster-gateways\nspec:\n clusterSets:\n\n - gateway-clusters # defines which ManagedClusterSet to use. \n numberOfClusters: 2 # defines how many clusters to select from the chosen clusterSets\nEOF\n
For more information on ManagedClusterSets and placements please see the OCM official docs: -
ManagedClusterSets
-
Placements
Finally in order to have the Gateway instances deployed to your spoke clusters that can start receiving traffic, you need to place the gateway.
-
To place the gateway, we need to add a placement label to gateway resource to instruct the gateway controller where we want this gateway instantiated.
kubectl --context kind-mgc-control-plane label gateway prod-web \"cluster.open-cluster-management.io/placement\"=\"http-gateway-placement\" -n multi-cluster-gateways\n
-
To have the gateway deployed to 2 clusters, you can add a second cluster to the clusterset by running the following:
kubectl --context kind-mgc-control-plane label managedcluster kind-mgc-workload-1 ingress-cluster=true\n
As the placement specifies numberOfClusters
as 2 your gateway will automatically be instantiated on the second cluster.
-
To find a configured gateway and instantiated gateway on the hub cluster. Run the following
kubectl --context kind-mgc-control-plane get gateway -A\n
You'll see the following:
kuadrant-multi-cluster-gateways prod-web istio 172.31.200.0 29s\nmulti-cluster-gateways prod-web kuadrant-multi-cluster-gateway-instance-per-cluster True 2m42s\n
-
Execute the following to see the gateway on the workload-1 cluster:
kubectl --context kind-mgc-workload-1 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.201.0 90s\n
While we recommend using Istio as the gateway provider as that is how you will get access to the full suite of policy APIs, it is possible to use another provider if you choose to however this will result in a reduced set of applicable policy objects.
If you are only using the DNSPolicy and TLSPolicy resources, you can use these APIs with any Gateway provider. To change the underlying provider, you need to set the gatewayclass param downstreamClass
.
-
Create the following configmap. Note: In this example, 'eg' stands for the Envoy gateway, which is mentioned in the prerequisites above:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: v1\ndata:\n params: |\n {\n \"downstreamClass\": \"eg\"\n }\nkind: ConfigMap\nmetadata:\n name: gateway-params\n namespace: multi-cluster-gateways\nEOF\n
-
Update the gatewayclass to include the above Configmap
kubectl --context kind-mgc-control-plane patch gatewayclass kuadrant-multi-cluster-gateway-instance-per-cluster -n multi-cluster-gateways --type merge --patch '{\"spec\":{\"parametersRef\":{\"group\":\"\",\"kind\":\"ConfigMap\",\"name\":\"gateway-params\",\"namespace\":\"multi-cluster-gateways\"}}}'\n
Once this has been created, any gateways created from that gateway class will result in a downstream gateway being provisioned with the configured downstreamClass. Run the following in both your hub and spoke cluster to see the gateways:
kubectl --context kind-mgc-control-plane get gateway -A\n
kubectl --context kind-mgc-workload-1 get gateway -A\n
"},{"location":"multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/#using-a-different-gateway-provider","title":"Using a different gateway provider?","text":""},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/","title":"Gateway Deletion","text":""},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#gateway-deletion","title":"Gateway deletion","text":"When deleting a gateway it should ONLY be deleted in the control plane cluster. This will the trigger the following events:
"},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#workload-clusters","title":"Workload cluster(s):","text":" - The corresponding gateway in the workload clusters will also be deleted.
"},{"location":"multicluster-gateway-controller/docs/gateways/gateway-deletion/#control-plane-clusters","title":"Control plane cluster(s):","text":" -
DNS Record deletion:
Gateways and DNS records have a 1:1 relationship only, when a gateway gets deleted the corresponding DNS record also gets marked for deletion. This then triggers the DNS record to be removed from the managed zone in the DNS provider (currently only route 53 is accepted).
-
Certs and secrets deletion :
When a gateway is created a cert is also created for the host in the gateway, this is also removed when the gateway is deleted.
"},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/","title":"API Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/#introduction","title":"Introduction","text":"This document will detail the setup of a reference architecture to support a number of API management use-cases connecting Kuadrant with other projects the wider API management on Kubernetes ecosystem.
"},{"location":"multicluster-gateway-controller/docs/how-to/api-walkthrough/#petstore-app-deployment","title":"Petstore App Deployment","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/","title":"Metrics walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#introduction","title":"Introduction","text":"This walkthrough shows how to install a metrics federation stack locally and query Istio metrics from the hub.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#video-walkthrough","title":"Video Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#installation-and-configuration-of-metrics","title":"Installation and Configuration of Metrics","text":"This document will guide you in installing metrics for your application and provide directions on where to access them. Additionally, it will include dashboards set up to display these metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#requirementsprerequisites","title":"Requirements/prerequisites","text":"Prior to commencing the metrics installation process, it is imperative that you have successfully completed the initial getting started guide. For reference, please consult the guide available at the following link: Getting Started Guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#setting-up-metrics","title":"Setting Up Metrics","text":"To establish metrics, simply execute the following script in your terminal:
curl https://raw.githubusercontent.com/kuadrant/multicluster-gateway-controller/main/hack/quickstart-metrics.sh | bash\n
This script will initiate the setup process for your metrics configuration. After the script finishes running, you should see something like:
Connect to Thanos Query UI\n URL: https://thanos-query.172.31.0.2.nip.io\n\nConnect to Grafana UI\n URL: https://grafana.172.31.0.2.nip.io\n
You can visit the Grafana dashboard by accessing the provided URL for Grafana UI. (you may need to scroll)
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#monitoring-operational-status-in-grafana-dashboard","title":"Monitoring Operational Status in Grafana Dashboard","text":"After setting up metrics, you can monitor the operational status of your system using the Grafana dashboard.
To generate traffic to the application, use curl
as follows:
while true; do curl -k https://$MGC_SUB_DOMAIN && sleep 5; done\n
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#accessing-the-thanos-ui","title":"Accessing the Thanos UI","text":" - Access the Thanos UI by clicking or entering the provided URL for the Grafana UI in your web browser.
https://thanos-query.172.31.0.2.nip.io\n
- In the Thanos UI query box, enter the below query and press 'Execute'
sum(rate(container_cpu_usage_seconds_total{namespace=\"monitoring\",container=\"prometheus\"}[5m]))\n
You should see a response in the table view. In the Graph view you should see some data over time as well.
sum(rate(istio_requests_total{}[5m])) by(destination_workload)\n
In the graph view you should see something that looks like the graph below. This shows the rate of requests (per second) for each Isito workload. In this case, there is 1 workload, balanced across 2 clusters.
To see the rate of requests per cluster (actually per pod across all clusters), the below query can be used. Over long periods of time, this graph can show traffic load balancing between application instances.
sum(rate(istio_requests_total{}[5m])) by(pod)\n
"},{"location":"multicluster-gateway-controller/docs/how-to/metrics-walkthrough/#accessing-the-grafana-dashboard","title":"Accessing the Grafana Dashboard","text":"To view the operational metrics and status, proceed with the following steps:
- Access the Grafana dashboard by clicking or entering the provided URL for the Grafana UI in your web browser.
https://grafana.172.31.0.2.nip.io\n
Note: The default login credentials for Grafana are admin/admin. You may need to accept the non-CA signed certificate to proceed.
- Navigate to the included Grafana Dashboard
Using the left sidebar in the Grafana UI, navigate to Dashboards > Browse
and select the Istio Workload Dashboard
, MGC SRE Dashboard
or any of the following Gateway Api State
dashboards.
In Istio Workload Dashboard
you should be able to see the following layout, which will include data from the curl
command you ran in the previous section.
The MGC SRE Dashboard
displays real-time insights and visualizations of resources managed by the multicluster-gateway-controller e.g. DNSPolicy, TLSPolicy, DNSRecord etc..
The Gateway API State / Gateways
provides real-time insights and visualizations for Gateways. It offers information about gateway listeners, listener status, gateway status, addresses, and attached routes
The Gateway API State / GatewayClasses
provides insights into Gateways organized by their respective Gateway Classes. It offers information about GatewayClasses and the supported features for each class.
The Gateway API State / HTTPRoutes
or any of the remaining routes focuses on their Routes
and provides insights into their configuration. It displays their targeted parent references, and attached parent references, offering a detailed view of how these routes are structured and associated with their respective resources.
The Grafana dashboard will provide you with real-time insights and visualizations of your gateway's performance and metrics.
By utilizing the Grafana dashboard, you can effectively monitor the health and behavior of your system, making informed decisions based on the displayed data. This monitoring capability enables you to proactively identify and address any potential issues to ensure the smooth operation of your environment.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/","title":"Multicluster Gateways Walkthrough","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#introduction","title":"Introduction","text":"This document will walk you through using Open Cluster Management (OCM) and Kuadrant to configure and deploy a multi-cluster gateway.
You will also deploy a simple application that uses that gateway for ingress and protects that applications endpoints with a rate limit policy.
We will start with a hub cluster and 2 workload clusters and highlight the automatic TLS integration and also the automatic DNS load balancing between gateway instances.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#requirements","title":"Requirements","text":" - Complete the Getting Started - Multi Cluster Guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#initial-setup","title":"Initial Setup","text":"In this walkthrough, we'll deploy test echo services across multiple clusters. If you followed the Getting Started - Multi Cluster Guide, you would have already set up a KUADRANT_ZONE_ROOT_DOMAIN
environment variable. For this tutorial, we'll derive a host from this domain for these echo services.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#create-a-gateway","title":"Create a gateway","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#check-the-managed-zone","title":"Check the managed zone","text":" -
First let's ensure the managedzone
is present:
kubectl get managedzone -n multi-cluster-gateways --context kind-mgc-control-plane\n
You should see the following: NAME DOMAIN NAME ID RECORD COUNT NAMESERVERS READY\nmgc-dev-mz test.hcpapps.net /hostedzone/Z08224701SVEG4XHW89W0 7 [\"ns-1414.awsdns-48.org\",\"ns-1623.awsdns-10.co.uk\",\"ns-684.awsdns-21.net\",\"ns-80.awsdns-10.com\"] True\n
You are now ready to begin creating a gateway!
- We will now create a multi-cluster gateway definition in the hub cluster:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"*.$KUADRANT_ZONE_ROOT_DOMAIN\"\n port: 443\n protocol: HTTPS\n tls:\n mode: Terminate\n certificateRefs:\n - name: apps-hcpapps-tls\n kind: Secret\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#enable-tls","title":"Enable TLS","text":" -
Create a TLSPolicy and attach it to your Gateway:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: TLSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n issuerRef:\n group: cert-manager.io\n kind: ClusterIssuer\n name: glbc-ca \nEOF\n
-
You should now see a Certificate resource in the hub cluster:
kubectl --context kind-mgc-control-plane get certificates -A\n
You should see the following: NAMESPACE NAME READY SECRET AGE\nmulti-cluster-gateways apps-hcpapps-tls True apps-hcpapps-tls 12m\n
It is possible to also use a letsencrypt certificate, but for simplicity in this walkthrough we are using a self-signed cert.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#place-the-gateway","title":"Place the gateway","text":"In the hub cluster there will be a single gateway definition but no actual gateway for handling traffic yet. This is because we haven't placed the gateway yet onto any of our ingress clusters.
-
To place the gateway, we need to add a placement label to gateway resource to instruct the gateway controller where we want this gateway instantiated:
kubectl --context kind-mgc-control-plane label gateway prod-web \"cluster.open-cluster-management.io/placement\"=\"http-gateway\" -n multi-cluster-gateways\n
-
On the hub cluster you should find there is a configured gateway:
kubectl --context kind-mgc-control-plane get gateway -A\n
you'll see the following: multi-cluster-gateways prod-web kuadrant-multi-cluster-gateway-instance-per-cluster True 2m42s\n
Later on we will add in another ingress cluster and in that case you will see the instantiated gateway.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#enable-dns","title":"Enable DNS","text":" -
Create a DNSPolicy and attach it to your Gateway:
kubectl --context kind-mgc-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway \nEOF\n
Once this is done, the Kuadrant multi-cluster gateway controller will pick up when a HTTPRoute has been attached to the Gateway it is managing from the hub and it will setup a DNS record to start bringing traffic to that gateway for the host defined in that listener.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#introducing-the-workload-clusters","title":"Introducing the workload clusters","text":"So now we have a working gateway with DNS and TLS configured. Let's place this gateway on the workload clusters and bring traffic to those gateways also.
-
We need to modify our placement to update our numberOfClusters
to 2. To patch, run:
kubectl --context kind-mgc-control-plane patch placement http-gateway -n multi-cluster-gateways --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/numberOfClusters\", \"value\": 2}]'\n
-
Run the following to see the gateway on the workload-1 cluster:
kubectl --context kind-mgc-workload-1 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.201.0 90s\n
-
Run the following to see the gateway on the workload-2 cluster:
kubectl --context kind-mgc-workload-2 get gateways -A\n
You'll see the following NAMESPACE NAME CLASS ADDRESS PROGRAMMED AGE\nkuadrant-multi-cluster-gateways prod-web istio 172.31.202.0 90s\n
Additionally, you should be able to see a secret containing a self-signed certificate.
-
There should also be an associated TLS secret:
kubectl --context kind-mgc-workload-1 get secrets -n kuadrant-multi-cluster-gateways\n
you'll see the following: NAME TYPE DATA AGE\napps-hcpapps-tls kubernetes.io/tls 3 13m\n
And in the second workload cluster
kubectl --context kind-mgc-workload-2 get secrets -n kuadrant-multi-cluster-gateways\n
you'll see the following: NAME TYPE DATA AGE\napps-hcpapps-tls kubernetes.io/tls 3 13m\n
The listener is configured to use this TLS secret also. So now our gateway has been placed and is running in the right locations with the right configuration and TLS has been setup for the HTTPS listeners.
So now we have workload ingress clusters configured with the same Gateway.
-
Let's create the HTTPRoute in the first workload cluster. Again, remembering to replace the hostname accordingly if you haven't already set a value for the KUADRANT_ZONE_ROOT_DOMAIN
variable as described in the Getting Started - Multi Cluster Guide:
kubectl --context kind-mgc-workload-1 apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n parentRefs:\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - \"echo.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: echo\n port: 8080\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: echo\nspec:\n ports:\n - name: http-port\n port: 8080\n targetPort: http-port\n protocol: TCP\n selector:\n app: echo \n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: echo\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: echo\n template:\n metadata:\n labels:\n app: echo\n spec:\n containers:\n - name: echo\n image: docker.io/jmalloc/echo-server\n ports:\n - name: http-port\n containerPort: 8080\n protocol: TCP \nEOF\n
-
Let's create the same HTTPRoute in the second workload cluster. Note the --context
references the second cluster
kubectl --context kind-mgc-workload-2 apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n parentRefs:\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - \"echo.$KUADRANT_ZONE_ROOT_DOMAIN\"\n rules:\n - backendRefs:\n - name: echo\n port: 8080\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: echo\nspec:\n ports:\n - name: http-port\n port: 8080\n targetPort: http-port\n protocol: TCP\n selector:\n app: echo \n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: echo\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: echo\n template:\n metadata:\n labels:\n app: echo\n spec:\n containers:\n - name: echo\n image: docker.io/jmalloc/echo-server\n ports:\n - name: http-port\n containerPort: 8080\n protocol: TCP \nEOF\n
-
If we take a look at the dnsrecord, you will see we now have two A records configured:
kubectl --context kind-mgc-control-plane get dnsrecord -n multi-cluster-gateways -o=yaml\n
-
Give DNS a minute or two to update. You should then be able to execute the following and get back the correct A record.
dig echo.$KUADRANT_ZONE_ROOT_DOMAIN\n
-
You should also be able to curl that endpoint
curl -k https://echo.$KUADRANT_ZONE_ROOT_DOMAIN\n\n# Request served by echo-XXX-XXX\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#watching-dns-changes","title":"Watching DNS changes","text":"If you want you can use watch dig echo.$KUADRANT_ZONE_ROOT_DOMAIN
to see the DNS switching between the two addresses
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/#follow-on-walkthroughs","title":"Follow-on Walkthroughs","text":"Here are some good, follow-on guides that build on this walkthrough:
- Simple RateLimitPolicy for App Developers
- Deploying/Configuring Metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/","title":"Multicluster LoadBalanced DNSPolicy","text":""},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#terms","title":"Terms","text":" GatewayAPI
: resources that model service networking in Kubernetes. Gateway
: Kubernetes Gateway resource. ManagedZone
: Kuadrant resource representing a Zone Apex in a dns provider. DNSPolicy
: Kuadrant policy for managing gateway dns. DNSRecord
: Kuadrant resource representing a set of records in a managed zone.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dns-provider-setup","title":"DNS Provider Setup","text":"A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about dns providers, and what domains these dns providers are currently hosting. This is done through the creation of ManagedZones and dns provider secrets containing the credentials for the dns provider account.
If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net
:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: echo.apps.hcpapps.net\n port: 80\n protocol: HTTP\n
In order for the DNSPolicy to act upon that listener, a ManagedZone must exist for that hostnames domain.
A secret containing the provider credentials must first be created:
kubectl create secret generic my-aws-credentials --type=kuadrant.io/aws --from-env-file=./aws-credentials.env -n multi-cluster-gateways\nkubectl get secrets my-aws-credentials -n multi-cluster-gateways -o yaml\napiVersion: v1\ndata:\n AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n AWS_REGION: <AWS_REGION>\n AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\nkind: Secret\nmetadata:\n name: my-aws-credentials\n namespace: multi-cluster-gateways\ntype: kuadrant.io/aws\n
And then a ManagedZone can be added for the desired domain referencing the provider credentials:
apiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: apps.hcpapps.net\n namespace: multi-cluster-gateways\nspec:\n domainName: apps.hcpapps.net\n description: \"apps.hcpapps.net managed domain\"\n dnsProviderSecretRef:\n name: my-aws-credentials\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dnspolicy-creation-and-attachment","title":"DNSPolicy creation and attachment","text":"Once an appropriate ManagedZone is configured for a Gateways listener hostname, we can now create and attach a DNSPolicy to start managing dns for it.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n healthCheck:\n allowInsecureCertificates: true\n additionalHeadersRef:\n name: probe-headers\n endpoint: /\n expectedResponses:\n\n - 200\n - 201\n - 301\n failureThreshold: 5\n port: 80\n protocol: http\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#target-reference","title":"Target Reference","text":"targetRef
field is taken from policy attachment's target reference API. It can only target one resource at a time. Fields included inside:
Group
is the group of the target resource. Only valid option is gateway.networking.k8s.io
. Kind
is kind of the target resource. Only valid options are Gateway
. Name
is the name of the target resource. Namespace
is the namespace of the referent. Currently only local objects can be referred so value is ignored.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#health-check","title":"Health Check","text":"The health check section is optional, the following fields are available:
allowInsecureCertificates
: Added for development environments, allows health probes to not fail when finding an invalid (e.g. self-signed) certificate. additionalHeadersRef
: A reference to a secret which contains additional headers such as an authentication token endpoint
: The path to specify for these health checks, e.g. /healthz
expectedResponses
: Defaults to 200 or 201, this allows other responses to be considered valid failureThreshold
: How many consecutive fails are required to consider this endpoint unhealthy port
: The port to connect to protocol
: The protocol to use for this connection
For more information about DNS Health Checks, see this guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#checking-status-of-health-checks","title":"Checking status of health checks","text":"To list all health checks:
kubectl get dnshealthcheckprobes -A\n
This will list all probes in the hub cluster, and whether they are currently healthy or not. To find more information on why a specific health check is failing, look at the status of that probe:
kubectl get dnshealthcheckprobe <name> -n <namespace> -o yaml\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#dnsrecord-resources","title":"DNSRecord Resources","text":"The DNSPolicy will create a DNSRecord resource for each listener hostname with a suitable ManagedZone configured. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.
Given the following Gateway status:
status:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-1/172.31.201.1\n - type: kuadrant.io/MultiClusterIPAddress\n value: kind-mgc-workload-2/172.31.202.1\n conditions:\n - lastTransitionTime: \"2023-07-24T19:09:54Z\"\n message: Handled by kuadrant.io/mgc-gw-controller\n observedGeneration: 1\n reason: Accepted\n status: \"True\"\n type: Accepted\n - lastTransitionTime: \"2023-07-24T19:09:55Z\"\n message: 'gateway placed on clusters [kind-mgc-workload-1 kind-mgc-workload-2] '\n observedGeneration: 1\n reason: Programmed\n status: \"True\"\n type: Programmed\n listeners:\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-1.api\n supportedKinds: []\n - attachedRoutes: 1\n conditions: []\n name: kind-mgc-workload-2.api\n supportedKinds: [] \n
The example DNSPolicy shown above would create a DNSRecord like the following:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n creationTimestamp: \"2023-07-24T19:09:56Z\"\n finalizers:\n\n - kuadrant.io/dns-record\n generation: 3\n labels:\n kuadrant.io/Gateway-uid: 0877f97c-f3a6-4f30-97f4-e0d7f25cc401\n kuadrant.io/record-id: echo\n name: echo.apps.hcpapps.net\n namespace: multi-cluster-gateways\n ownerReferences:\n - apiVersion: gateway.networking.k8s.io/v1\n kind: Gateway\n name: echo-app\n uid: 0877f97c-f3a6-4f30-97f4-e0d7f25cc401\n - apiVersion: kuadrant.io/v1alpha1\n blockOwnerDeletion: true\n controller: true\n kind: ManagedZone\n name: apps.hcpapps.net\n uid: 26a06799-acff-476b-a1a3-c831fd19dcc7\n resourceVersion: \"25464\"\n uid: 365bf57f-10b4-42e8-a8e7-abb6dce93985\nspec:\n endpoints:\n - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n managedZone:\n name: apps.hcpapps.net \n
Which results in the following records being created in AWS Route53 (The provider we used in our example ManagedZone above):
The listener hostname is now be resolvable through dns:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
More information about the dns record structure can be found in the DNSRecord structure document.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#load-balancing","title":"Load Balancing","text":"Configuration of DNS Load Balancing features is done through the loadBalancing
field in the DNSPolicy spec.
loadBalancing
field contains the specification of how dns will be configured in order to provide balancing of load across multiple clusters. Fields included inside:
weighted
field describes how weighting will be applied to weighted dns records. Fields included inside: defaultWeight
arbitrary weight value that will be applied to weighted dns records by default. Integer greater than 0 and no larger than the maximum value accepted by the target dns provider. custom
array of custom weights to apply when custom attribute values match. geo
field enables the geo routing strategy. Fields included inside: defaultGeo
geo code to apply to geo dns records by default. The values accepted are determined by the target dns provider.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#weighted","title":"Weighted","text":"A DNSPolicy with an empty loadBalancing
spec, or with a loadBalancing.weighted.defaultWeight
set and nothing else produces a set of records grouped and weighted to produce a Round Robin routing strategy where all target clusters will have an equal chance of being returned in DNS queries.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 100 # <--- New Default Weight being added\n
The weight of all records is adjusted to reflect the new defaultWeight
value of 100
. This will still produce the same Round Robin routing strategy as before since all records still have equal weight values.
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#custom-weights","title":"Custom Weights","text":"In order to manipulate how much traffic individual clusters receive, custom weights can be added to the DNSPolicy.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 120\n custom: # <--- New Custom Weights being added\n\n - weight: 255\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n - weight: 10\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: GCP\n
And apply custom-weight
labels to each of our managed cluster resources:
kubectl label --overwrite managedcluster kind-mgc-workload-1 kuadrant.io/lb-attribute-custom-weight=AWS\nkubectl label --overwrite managedcluster kind-mgc-workload-2 kuadrant.io/lb-attribute-custom-weight=GCP\n
The DNSRecord for our listener host gets updated, and the weighted records are adjusted to have the new values:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"10\" # <--- Weight is updated\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"255\" # <--- Weight is updated\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n
In the above scenario the managed cluster kind-mgc-workload-2
(GCP) IP address will be returned far less frequently in DNS queries than kind-mgc-workload-1
(AWS)
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#geo","title":"Geo","text":"To enable Geo Load balancing the loadBalancing.geo.defaultGeo
field should be added. This informs the DNSPolicy that we now want to start making use of Geo Location features in our target provider. This will change the single record set group created from default
(What is created for weighted only load balancing) to a geo specific one based on the value of defaultGeo
.
If we apply the following update to the DNSPolicy:
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n weighted:\n defaultWeight: 120\n custom:\n\n - weight: 255\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: AWS\n - weight: 10\n selector:\n matchLabels:\n kuadrant.io/lb-attribute-custom-weight: GCP\n geo:\n defaultGeo: US # <--- New `geo.defaultGeo` added for `US` (United States)\n
The DNSRecord for our listener host gets updated, and the default geo is replaced with the one we specified:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net # <--- New `us` geo location CNAME is created\n providerSpecific:\n - name: geo-country-code\n value: US\n recordTTL: 300\n recordType: CNAME\n setIdentifier: US\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net # <--- Default catch all CNAME is updated to point to `us` target\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net # <--- Gateway default group is now `us`\n providerSpecific:\n - name: weight\n value: \"10\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net # <--- Gateway default group is now `us`\n providerSpecific:\n - name: weight\n value: \"255\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n
The listener hostname is still resolvable, but now routed through the us
record set:
dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\nus.lb-2903yb.echo.apps.hcpapps.net. # <--- `us` CNAME now in the chain\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#configuring-cluster-geo-locations","title":"Configuring Cluster Geo Locations","text":"The defaultGeo
as described above puts all clusters into the same geo group, but for geo to be useful we need to mark our clusters as being in different locations. We can do this though by adding geo-code
attributes on the ManagedCluster to show which county each cluster is in. The values that can be used are determined by the dns provider (See Below).
Apply geo-code
labels to each of our managed cluster resources:
kubectl label --overwrite managedcluster kind-mgc-workload-1 kuadrant.io/lb-attribute-geo-code=US\nkubectl label --overwrite managedcluster kind-mgc-workload-2 kuadrant.io/lb-attribute-geo-code=ES\n
The above indicates that kind-mgc-workload-1
is located in the US (United States), which is the same as our current default geo, and kind-mgc-workload-2
is in ES (Spain).
The DNSRecord for our listener host gets updated, and records are now divided into two groups, us and es:
kubectl get dnsrecord echo.apps.hcpapps.net -n multi-cluster-gateways -o yaml | yq .spec.endpoints\n\n- dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.202.1\n- dnsName: echo.apps.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-2903yb.echo.apps.hcpapps.net\n- dnsName: es.lb-2903yb.echo.apps.hcpapps.net # <--- kind-mgc-workload-2 target now added to `es` group\n providerSpecific:\n - name: weight\n value: \"10\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net # <--- New `es` geo location CNAME is created\n providerSpecific:\n - name: geo-country-code\n value: ES\n recordTTL: 300\n recordType: CNAME\n setIdentifier: ES\n targets:\n - es.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: US\n recordTTL: 300\n recordType: CNAME\n setIdentifier: US\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: geo-country-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - us.lb-2903yb.echo.apps.hcpapps.net\n- dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.201.1\n- dnsName: us.lb-2903yb.echo.apps.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"255\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n targets:\n - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n
In the above scenario any requests made in Spain will be returned the IP address of kind-mgc-workload-2
and requests made from anywhere else in the world will be returned the IP address of kind-mgc-workload-1
. Weighting of records is still enforced between clusters in the same geo group, in the case above however they are having no effect since there is only one cluster in each group.
If an unsupported value is given to a provider, DNS records will not be created. Please choose carefully. For more information on what location is right for your needs please, read that provider's documentation (see links below).
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#locations-supported-per-dns-provider","title":"Locations supported per DNS provider","text":"Supported AWS GCP Continents Country codes States Regions"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#continents-and-country-codes-supported-by-aws-route-53","title":"Continents and country codes supported by AWS Route 53","text":":Note: For more information please the official AWS documentation
To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]
"},{"location":"multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/#regions-supported-by-gcp-cloud-dns","title":"Regions supported by GCP CLoud DNS","text":"To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/","title":"Simple Rate Limiting for Application Developers","text":"This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#requirements","title":"Requirements","text":" - Complete the Multicluster Gateways Walkthrough where you'll have an environment configured with a Gateway that we'll use in this guide.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#overview","title":"Overview","text":"In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.$KUADRANT_ZONE_ROOT_DOMAIN
, where it exposes the endpoints GET /toys*
and POST /toys
, respectively, to mimic operations of reading and writing toy records.
We will rate limit the POST /toys
endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#1-deploy-the-toy-store-api","title":"\u2460 Deploy the Toy Store API","text":""},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#create-the-deployment","title":"Create the Deployment","text":"Note: You can skip this step and proceed to Create the HTTPRoute if you've already deployed the Toy Store API as part of the AuthPolicy for Application Developers and Platform Engineers guide.
Create the deployments for both clusters we've created previously (kind-mgc-workload-1
& kind-mgc-workload-2
).
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: toystore\n labels:\n app: toystore\nspec:\n selector:\n matchLabels:\n app: toystore\n template:\n metadata:\n labels:\n app: toystore\n spec:\n containers:\n\n - name: toystore\n image: quay.io/3scale/authorino:echo-api\n env:\n - name: PORT\n value: \"3000\"\n ports:\n - containerPort: 3000\n name: http\n replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: toystore\nspec:\n selector:\n app: toystore\n ports:\n - name: http\n port: 80\n protocol: TCP\n targetPort: 3000\nEOF\ndone\n
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#create-the-httproute","title":"Create the HTTPRoute","text":"Create a HTTPRoute to route traffic to the services via the Gateways:
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: toystore\nspec:\n parentRefs:\n\n - kind: Gateway\n name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n hostnames:\n - toystore.$KUADRANT_ZONE_ROOT_DOMAIN\n rules:\n - matches:\n - method: GET\n path:\n type: PathPrefix\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\n - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\n backendRefs:\n - name: toystore\n port: 80\nEOF\ndone\n
Verify the routes work:
curl -ik https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys\n# HTTP/1.1 200 OK\n
Given the two clusters, and our previously created DNSPolicy
, traffic will load balance between these clusters round-robin style. Load balancing here will be determined in part by DNS TTLs, so it can take a minute or two for requests to flow to both services.
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#2-enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"\u2461 Enforce rate limiting on requests to the Toy Store API","text":"Create a Kuadrant RateLimitPolicy
to configure rate limiting:
for context in kind-mgc-workload-1 kind-mgc-workload-2; do kubectl --context $context apply -f - <<EOF\napiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n \"create-toy\":\n rates:\n\n - limit: 5\n duration: 10\n unit: second\n routeSelectors:\n - matches: # selects the 2nd HTTPRouteRule of the targeted route\n - method: POST\n path:\n type: Exact\n value: \"/toys\"\nEOF\ndone\n
Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.
Verify the rate limiting works by sending requests in a loop.
Up to 5 successful (200 OK
) requests every 10 seconds to POST /toys
, then 429 Too Many Requests
:
while :; do curl --write-out '%{http_code}' --silent -k --output /dev/null https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys -X POST | egrep --color \"\\b(429)\\b|$\"; sleep 1; done\n
Unlimited successful (200 OK
) to GET /toys
:
while :; do curl --write-out '%{http_code}' --silent -k --output /dev/null https://toystore.$KUADRANT_ZONE_ROOT_DOMAIN/toys | egrep --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/#next-steps","title":"Next Steps","text":"Here are some good, follow-on guides that build on this walkthrough:
- AuthPolicy for Application Developers and Platform Engineers
- Deploying/Configuring Metrics.
"},{"location":"multicluster-gateway-controller/docs/how-to/template/","title":"Title","text":""},{"location":"multicluster-gateway-controller/docs/how-to/template/#introduction","title":"Introduction","text":"blah blah amazing and wonderful feature blah blah gateway blah blah DNS
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#requirements","title":"Requirements","text":" - A computer
- Electricity
- Kind
- AWS Account
- Route 53 enabled
- Other Walkthroughs
## Installation and Setup
- Clone this repo locally
-
Setup a ./controller-config.env
file in the root of the repo with the following key values
# this sets up your default managed zone\nAWS_DNS_PUBLIC_ZONE_ID=<AWS ZONE ID>\n# this is the domain at the root of your zone (foo.example.com)\nZONE_ROOT_DOMAIN=<replace.this>\nLOG_LEVEL=1\n
-
Setup a ./aws-credentials.env
with credentials to access route 53
For example:
AWS_ACCESS_KEY_ID=<access_key_id>\nAWS_SECRET_ACCESS_KEY=<secret_access_key>\nAWS_REGION=eu-west-1\n
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#open-terminal-sessions","title":"Open terminal sessions","text":"For this walkthrough, we're going to use multiple terminal sessions/windows, all using multicluster-gateway-controller
as the pwd
.
Open three windows, which we'll refer to throughout this walkthrough as:
T1
(Hub Cluster) T2
(Where we'll run our controller locally) T3
(Workloads cluster)
To setup a local instance, in T1
, run:
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#known-bugs","title":"Known bugs","text":"buzzzzz
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#follow-on-walkthroughs","title":"Follow on Walkthroughs","text":"Some good follow on walkthroughs that build on this walkthrough
"},{"location":"multicluster-gateway-controller/docs/how-to/template/#helpful-symbols-for-dev-use","title":"Helpful symbols (For dev use)","text":" - for more see https://gist.github.com/rxaviers/7360908
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/","title":"Setting up MGC in Existing OCM Clusters","text":"This guide will show you how to install and configure the Multi-Cluster Gateway Controller in pre-existing Open Cluster Management configured clusters.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#prerequisites","title":"Prerequisites","text":" - A hub cluster running the OCM control plane (>= v0.11.0 )
- Open cluster management addons enabled
clusteradm install hub-addon --names application-manager
- Any number of additional spoke clusters that have been configured as OCM ManagedClusters
- Kubectl (>= v1.14.0)
- Either a pre-existing cert-manager(>=v1.12.2) installation or the Kustomize and Helm CLIs installed
- Amazon Web services (AWS) and or Google cloud provider (GCP) credentials. See the DNS Provider guide for obtaining these credentials.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#configure-ocm-with-rawfeedbackjsonstring-feature-gate","title":"Configure OCM with RawFeedbackJsonString Feature Gate","text":"All OCM spoke clusters must be configured with the RawFeedbackJsonString
feature gate enabled.
Patch each spoke cluster's klusterlet
in an existing OCM install:
kubectl patch klusterlet klusterlet --type merge --patch '{\"spec\": {\"workConfiguration\": {\"featureGates\": [{\"feature\": \"RawFeedbackJsonString\", \"mode\": \"Enable\"}]}}}' --context <EACH_SPOKE_CLUSTER>\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#setup-for-hub-commands","title":"Setup for hub commands","text":"Many of the commands in this document should be run in the context of your hub cluster. By configure HUB_CLUSTER which will be used in the commands:
export HUB_CLUSTER=<HUB_CUSTER_NAME>\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#install-cert-manager","title":"Install Cert-Manager","text":"Cert-manager first needs to be installed on your hub cluster. If this has not previously been installed on the cluster, see the documentation for installation instructions here.
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#installing-mgc","title":"Installing MGC","text":"First, run the following command in the context of your hub cluster to install the Gateway API CRDs:
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml --context $HUB_CLUSTER\n
Verify the CRDs have been established:
kubectl wait --timeout=5m crd/gatewayclasses.gateway.networking.k8s.io crd/gateways.gateway.networking.k8s.io crd/httproutes.gateway.networking.k8s.io --for=condition=Established --context $HUB_CLUSTER\n
customresourcedefinition.apiextensions.k8s.io/gatewayclasses.gateway.networking.k8s.io condition met\ncustomresourcedefinition.apiextensions.k8s.io/gateways.gateway.networking.k8s.io condition met\ncustomresourcedefinition.apiextensions.k8s.io/httproutes.gateway.networking.k8s.io condition met\n
Then run the following command to install the MGC:
kubectl apply -k \"github.com/kuadrant/multicluster-gateway-controller.git/config/mgc-install-guide?ref=release-0.2\" --context $HUB_CLUSTER\n
In addition to the MGC, this will also install the Kuadrant add-on manager and a GatewayClass
from which MGC-managed Gateways
can be instantiated.
Verify that the MGC and add-on manager have been installed and are running:
kubectl wait --timeout=5m -n multicluster-gateway-controller-system deployment/mgc-controller-manager --for=condition=Available --context $HUB_CLUSTER\n
deployment.apps/mgc-controller-manager condition met\n
Verify that the GatewayClass
has been accepted by the MGC:
kubectl wait --timeout=5m gatewayclass/kuadrant-multi-cluster-gateway-instance-per-cluster --for=condition=Accepted --context $HUB_CLUSTER\n
gatewayclass.gateway.networking.k8s.io/kuadrant-multi-cluster-gateway-instance-per-cluster condition met\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#creating-a-managedzone","title":"Creating a ManagedZone","text":"Note: To manage the creation of DNS records, MGC uses ManagedZone resources. A ManagedZone
can be configured to use DNS Zones on both AWS (Route53), and GCP (Cloud DNS). Commands to create each are provided below.
First, depending on the provider you would like to use export the environment variables detailed here in a terminal session.
Next, create a secret containing either the AWS or GCP credentials. We'll also create a namespace for your MGC configs:
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#aws","title":"AWS:","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: mgc-aws-credentials\n namespace: multi-cluster-gateways\ntype: \"kuadrant.io/aws\"\nstringData:\n AWS_ACCESS_KEY_ID: ${KUADRANT_AWS_ACCESS_KEY_ID}\n AWS_SECRET_ACCESS_KEY: ${KUADRANT_AWS_SECRET_ACCESS_KEY}\n AWS_REGION: ${KUADRANT_AWS_REGION}\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#gcp","title":"GCP","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: mgc-gcp-credentials\n namespace: multi-cluster-gateways\ntype: \"kuadrant.io/gcp\"\nstringData:\n GOOGLE: ${GOOGLE}\n PROJECT_ID: ${PROJECT_ID}\nEOF\n
Create a ManagedZone
using the commands below:
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#aws_1","title":"AWS:","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: mgc-dev-mz\n namespace: multi-cluster-gateways\nspec:\n id: ${KUADRANT_AWS_DNS_PUBLIC_ZONE_ID}\n domainName: ${KUADRANT_ZONE_ROOT_DOMAIN}\n description: \"Dev Managed Zone\"\n dnsProviderSecretRef:\n name: mgc-aws-credentials\nEOF\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#gcp_1","title":"GCP","text":"cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: mgc-dev-mz\n namespace: multi-cluster-gateways\nspec:\n id: ${ZONE_NAME}\n domainName: ${ZONE_DNS_NAME}\n description: \"Dev Managed Zone\"\n dnsProviderSecretRef:\n name: mgc-gcp-credentials\nEOF\n
Verify that the ManagedZone
has been created and is in a ready state:
kubectl get managedzone -n multi-cluster-gateways --context $HUB_CLUSTER\n
NAME DOMAIN NAME ID RECORD COUNT NAMESERVERS READY\nmgc-dev-mz ef.hcpapps.net /hostedzone/Z06419551EM30QQYMZN7F 2 [\"ns-1547.awsdns-01.co.uk\",\"ns-533.awsdns-02.net\",\"ns-200.awsdns-25.com\",\"ns-1369.awsdns-43.org\"] True\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#creating-a-cert-issuer","title":"Creating a Cert Issuer","text":"Create a ClusterIssuer
to be used with cert-manager
. For simplicity, we will create a self-signed cert issuer here, but other issuers can also be configured.
cat <<EOF | kubectl apply -f - --context $HUB_CLUSTER\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: mgc-ca\n namespace: cert-manager\nspec:\n selfSigned: {}\nEOF\n
Verify that the clusterIssuer
is ready:
kubectl wait --timeout=5m -n cert-manager clusterissuer/mgc-ca --for=condition=Ready --context $HUB_CLUSTER\n
clusterissuer.cert-manager.io/mgc-ca condition met\n
"},{"location":"multicluster-gateway-controller/docs/installation/control-plane-installation/#next-steps","title":"Next Steps","text":"Now that you have MGC installed and configured in your hub cluster, you can now continue with any of these follow-on guides:
- Installing the Kuadrant Service Protection components
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/","title":"Installing Kuadrant Service Protection into an existing OCM Managed Cluster","text":""},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#introduction","title":"Introduction","text":"This walkthrough will show you how to install and setup the Kuadrant Operator into an OCM Managed Cluster.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#prerequisites","title":"Prerequisites","text":" - Access to an Open Cluster Management(OCM) (>= v0.11.0) Managed Cluster, which has already been bootstrapped and registered with a hub cluster
- We have a guide which covers this in detail
- For more information on OCM also see:
- OCM quick start
- Managed cluster
- Kubectl (>= v1.14.0)
- OLM installed on the ManagedCluster where you want to run the Kuadrant Service Protection components
- For installation guides please see:
- Operator-sdk
- OLM
- Istio operator v1.20.0 installed on the spoke clusters
- Please see install guide here
- Gateway API v1
- To install please use:
kubectl apply -f \"https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml\"
- For more information please see: GatewayAPI DOCs
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#install-the-kuadrant-ocm-add-on","title":"Install the Kuadrant OCM Add-On","text":"To install the Kuadrant Service Protection components into a spoke ManagedCluster
, target your OCM Hub cluster with kubectl
and run:
kubectl apply -k \"github.com/kuadrant/multicluster-gateway-controller.git/config/service-protection-install-guide?ref=release-0.3\" -n namespace-of-your-managed-spoke-cluster-on-the-hub\n
The above command will install the ManagedClusterAddOn
resource needed to install the Kuadrant addon into the namespace representing a spoke cluster, and install the Kuadrant data-plane components into the open-cluster-management-agent-addon
namespace.
The Kuadrant addon will install:
- Kuadrant Operator
- Limitador (and its associated operator)
- Authorino (and its associated operator)
For more details, see the Kuadrant components installed by the kuadrant-operator
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#olm-and-openshift-catalogsource","title":"OLM and OpenShift CatalogSource","text":"The Kuadrant OCM (Open Cluster Management) Add-On depends on the Operator Lifecycle Manager (OLM)'s CatalogSource
. By default, this is set to olm/operatorhubio-catalog
.
In OpenShift environments, OLM comes pre-installed. However, it is configured to use the openshift-marketplace/community-operators
CatalogSource by default, not the olm/operatorhubio-catalog
.
To align the Kuadrant add-on with the OpenShift default CatalogSource, you can patch the add-on's CatalogSource configuration. Run the following command (note it needs to be run for each managed cluster where the add-on is installed):
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"CatalogSource\":\"community-operators\", \"CatalogSourceNS\":\"openshift-marketplace\"}' -n managed-cluster-ns\n
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#existing-istio-installations-and-changing-the-default-istio-operator-name","title":"Existing Istio installations and changing the default Istio Operator name","text":"In the case where you have an existing Istio installation on a cluster, you may encounter an issue where the Kuadrant Operator expects Istio's Operator to be named istiocontrolplane
.
The istioctl
command saves the IstioOperator CR that was used to install Istio in a copy of the CR named installed-state
.
To let the Kuadrant operator use this existing installation, set the following:
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"IstioOperator\":\"installed-state\"}' -n <managed spoke cluster>
This will propagate down and update the Kuadrant Operator, used by the Kuadrant OCM Addon.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#changing-the-name-of-the-channel-to-use","title":"Changing the name of the channel to use","text":"If you want to use a different channel with the ManagedClusterAddon
to install the kuadrant operator. You can do so by overriding the channel with the follow annotation:
kubectl annotate managedclusteraddon kuadrant-addon \"addon.open-cluster-management.io/values\"='{\"CatalogSourceNS\":\"openshift-marketplace\", \"CatalogSource\":\"community-operators\", \"Channel\":\"preview\"}' -n managed-cluster-ns
This will propagate down and update the Kuadrant Subscription, used by OLM in the spoke.
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#verify-the-kuadrant-addon-installation","title":"Verify the Kuadrant addon installation","text":"To verify the Kuadrant OCM addon has installed currently, run:
kubectl wait --timeout=5m -n kuadrant-system kuadrant/kuadrant-sample --for=condition=Ready\n
You should see the namespace kuadrant-system
, and the following pods come up:
- authorino-value
- authorino-operator-value
- kuadrant-operator-controller-manager-value
- limitador-value
- limitador-operator-controller-manager-value
"},{"location":"multicluster-gateway-controller/docs/installation/service-protection-installation/#further-reading","title":"Further Reading","text":"With the Kuadrant data plane components installed, here is some further reading material to help you utilise Authorino and Limitador:
Getting started with Authorino Getting started With Limitador
"},{"location":"multicluster-gateway-controller/docs/proposals/","title":"Index","text":""},{"location":"multicluster-gateway-controller/docs/proposals/#proposals","title":"Proposals","text":"This directory contains proposals accepted into the MGC. The template for add a proposal is located in this directory. Make a copy of the template and use it to define your own proposal.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/","title":"DNS Policy","text":""},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#problem","title":"Problem","text":"Gateway admins, need a way to define the DNS policy for a gateway distributed across multiple clusters in order to control how much and which traffic reaches these gateways. Ideally we would allow them to express a strategy that they want to use without needing to get into the details of each provider and needing to create and maintain dns record structure and individual records for all the different gateways that may be within their infrastructure.
Use Cases
As a gateway admin, I want to be able to reduce latency for my users by routing traffic based on the GEO location of the client. I want this strategy to automatically expand and adjust as my gateway topology grows and changes.
As a gateway admin, I have a discount with a particular cloud provider and want to send more of my traffic to the gateways hosted in that providers infrastructure and as I add more gateways I want that balance to remain constant and evolve to include my new gateways.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#goals","title":"Goals","text":" - Allow definition of a DNS load balancing strategy to decide how traffic should be weighted across multiple gateway instances from the central control plane.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#none-goals","title":"None Goals","text":" - Allow different DNS policies for different listeners. Although this may be something we look to support in the future, currently policy attachment does not allow for this type of targeting. This means a DNSPolicy is applied for the whole gateway currently.
- Define how health checks should work, this will be part of a separate proposal
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#terms","title":"Terms","text":" - managed listener: This is a listener with a host backed by a DNS zone managed by the multi-cluster gateway controller
- hub cluster: control plane cluster that managed 1 or more spokes
- spoke cluster: a cluster managed by the hub control plane cluster. This is where gateway are instantiated
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#proposal","title":"Proposal","text":"Provide a control plane DNSPolicy API that uses the idea of direct policy attachment from gateway API that allows a load balancing strategy to be applied to the DNS records structure for any managed listeners being served by the data plane instances of this gateway. The DNSPolicy also covers health checks that inform the DNS response but that is not covered in this document.
Below is a draft API for what we anticipate the DNSPolicy to look like
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n health:\n ...\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom: #optional\n\n - value: AWS #optional with both GEO and weighted. With GEO the custom weight is applied to gateways within a Geographic region\n weight: 10\n - value: GCP\n weight: 20\n GEO: #optional\n defaultGeo: IE # required with GEO. Chooses a default DNS response when no particular response is defined for a request from an unknown GEO.\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#available-load-balancing-strategies","title":"Available Load Balancing Strategies","text":"GEO and Weighted load balancing are well understood strategies and this API effectively allow a complex requirement to be expressed relatively simply and executed by the gateway controller in the chosen DNS provider. Our default policy will execute a \"Round Robin\" weighted strategy which reflects the current default behaviour.
With the above API we can provide weighted and GEO and weighted within a GEO. A weighted strategy with a minimum of a default weight is always required and the simplest type of policy. The multi-cluster gateway controller will set up a default policy when a gateway is discovered (shown below). This policy can be replaced or modified by the user. A weighted strategy can be complimented with a GEO strategy IE they can be used together in order to provide a GEO and weighted (within a GEO) load balancing. By defining a GEO section, you are indicating that you want to use a GEO based strategy (how this works is covered below).
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: default-policy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted: # required\n defaultWeight: 10 #required, all records created get this weight\n health:\n ... \n
In order to provide GEO based DNS and allow customisation of the weighting, we need some additional information to be provided by the gateway / cluster admin about where this gateway has been placed. For example if they want to use GEO based DNS as a strategy, we need to know what GEO identifier(s) to use for each record we create and a default GEO to use as a catch-all. Also, if the desired load balancing approach is to provide custom weighting and no longer simply use Round Robin, we will need a way to identify which records to apply that custom weighting to based on the clusters the gateway is placed on.
To solve this we will allow two new attributes to be added to the ManagedCluster
resource as labels:
kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
These two labels allow setting values in the DNSPolicy that will be reflected into DNS records for gateways placed on that cluster depending on the strategies used. (see the first DNSPolicy definition above to see how these values are used) or take a look at the examples at the bottom.
example :
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\nspec: \n
The attributes provide the key and value we need in order to understand how to define records for a given LB address based on the DNSPolicy targeting the gateway.
The kuadrant.io/lb-attribute-geo-code
attribute value is provider specific, using an invalid code will result in an error status condition in the DNSrecord resource.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#dns-record-structure","title":"DNS Record Structure","text":"This is an advanced topic and so is broken out into its own proposal doc DNS Record Structure
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#custom-weighting","title":"Custom Weighting","text":"Custom weighting will use the associated custom-weight
attribute set on the ManagedCluster
to decide which records should get a specific weight. The value of this attribute is up to the end user.
example:
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
The above is then used in the DNSPolicy to set custom weights for the records associated with the target gateway.
- value: GCP\n weight: 20\n
So any gateway targeted by a DNSPolicy with the above definition that is placed on a ManagedCluster
with the kuadrant.io/lb-attribute-custom-weight
set with a value of GCP will get an A record with a weight of 20
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#status","title":"Status","text":"DNSPolicy should have a ready condition that reflect that the DNSRecords have been created and configured as expected. In the case that there is an invalid policy, the status message should reflect this and indicate to the user that the old DNS has been preserved.
We will also want to add a status condition to the gateway status indicating it is effected by this policy. Gateway API recommends the following status condition
- type: gateway.networking.k8s.io/PolicyAffected\n status: True \n message: \"DNSPolicy has been applied\"\n reason: PolicyApplied\n ...\n
https://github.com/kubernetes-sigs/gateway-api/pull/2128/files#diff-afe84021d0647e83f420f99f5d18b392abe5ec82d68f03156c7534de9f19a30aR888
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#example-policies","title":"Example Policies","text":""},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#round-robin-the-default-policy","title":"Round Robin (the default policy)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: RoundRobinPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#geo-round-robin","title":"GEO (Round Robin)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNS\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n GEO:\n defaultGeo: IE\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#custom","title":"Custom","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: SendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure #any record associated with a gateway on a cluster without this value gets the default\n weight: 30\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#geo-with-custom-weights","title":"GEO with Custom Weights","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNSAndSendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure\n weight: 30\n GEO:\n defaultGeo: IE\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#considerations-and-limitations","title":"Considerations and Limitations","text":"You cannot have a different load balancing strategy for each listener within a gateway. So in the following gateway definition
spec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n hostname: myapp.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS\n - allowedRoutes:\n namespaces:\n from: All\n hostname: other.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS \n
The DNS policy targeting this gateway will apply to both myapp.hcpapps.net and other.hcpapps.net
However, there is still significant value even with this limitation. This limitation is something we will likely revisit in the future
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSPolicy/#background-docs","title":"Background Docs","text":"DNS Provider Support
AWS DNS
Google DNS
Azure DNS
Direct Policy Attachment
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/","title":"DNSRecordStructure","text":"DNSRecord is our API for expressing DNS endpoints via a kube CRD based API. It is managed by the multi-cluster gateway controller based on the desired state expressed in higher level APIs such as the Gateway or a DNSPolicy. In order to provide our feature set, we need to carefully consider how we structure our records and the types of records we need. This document proposes a particular structure based on the requirements and feature set we have.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#requirements","title":"Requirements","text":"We want to be able to support Gateway definitions that use the following listener definitions:
- wildcard:
*.example.com
and fully qualified listener host www.example.com
definitions with the notable exception of fully wildcarded ie *
as we cannot provide any DNS or TLS for something with no defined hostname. - listeners that have HTTPRoute defined on less than all the clusters where the listener is available. IE we don't want to send traffic to clusters where there is no HTTPRoute attached to the listener.
- Gateway instances that provide IPs that are deployed alongside instances on different infra that provide host names causing the addresses types on each of gateway instance to be different (IPAddress or HostAddress).
- We want to provide GEO based DNS as a feature of DNSPolicy and so our DNSRecord structure must support this.
- We want to offer default weighted and custom weighted DNS as part of DNSPolicy
- We want to allow root or apex domain to be used as listener hosts
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#diagram","title":"Diagram","text":"https://lucid.app/lucidchart/2f95c9c9-8ddf-4609-af37-48145c02ef7f/edit?viewport_loc=-188%2C-61%2C2400%2C1183%2C0_0&invitationId=inv_d5f35eb7-16a9-40ec-b568-38556de9b568
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#proposal","title":"Proposal","text":"For each listener defined in a gateway, we will create a set of records with the following rules.
none apex domain:
We will have a generated lb (load balancer) dns name that we will use as a CNAME for the listener hostname. This DNS name is not intended for use within a HTTPRoute but is instead just a DNS construct. This will allow us to set up additional CNAME records for that DNS name in the future that are returned based a GEO location. These DNS records will also be CNAMES pointing to specific gateway dns names, this will allow us to setup a weighted response. So the first layer CNAME handles balancing based on geo, the second layer handles balancing based on weighting.
shop.example.com\n | |\n (IE) (AUS)\n CNAME lb.shop.. lb.shop..\n | | | |\n (w 100) (w 200) (w 100) (w100)\n CNAME g1.lb.. g2.lb.. g3.lb.. g4.lb..\n A 192.. A 81.. CNAME aws.lb A 82..\n
When there is no geo strategy defined within the DNSPolicy, we will put everything into a default geo (IE a catch-all record) default.lb-{guid}.{listenerHost}
but set the routing policy to GEO allowing us to add more geo based records in the future if the gateway admin decides to move to a geo strategy as their needs grow.
To ensure this lb dns name is unique and does not clash we will use a short guid as part of the subdomain so lb-{guid}.{listenerHost}.
this guid will be based on the gateway name and gateway namespace in the control plane.
For a geo strategy we will add a geo record with a prefix to the lb subdomain based on the geo code. When there is no geo we will use default
as the prefix. {geo-code}.lb-{guid}.{listenerHost}
. Finally, for each gateway instance on a target cluster we will add a {spokeClusterName}.lb-{guid}.{listenerHost}
To allow for a mix of hostname and IP address types, we will always use a CNAME . So we will create a dns name for IPAddress with the following structure: {guid}.lb-{guid}.{listenerHost}
where the first guid will be based on the cluster name where the gateway is placed.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#apex-domains","title":"Apex Domains","text":"An apex domain is the domain at the apex or root of a zone. These are handled differently by DNS as they often have NS and SOA records. Generally it is not possible to set up a CNAME for apex domain (although some providers allow it).
If a listener is added to a gateway that is an apex domain, we can only add A records for that domain to keep ourselves compliant with as many providers as possible. If a listener is the apex domain, we will setup A records for that domain (favouring gateways with an IP address or resolving the IP behind a host) but there will be no special balancing/weighting done. Instead, we will expect that the owner of that will setup a HTTPRoute with a 301 permanent redirect sending users from the apex domain e.g. example.com to something like: www.example.com where the www subdomain based listener would use the rules of the none apex domains and be where advanced geo and weighted strategies are applied.
- gateway listener host name : example.com
- example.com A 81.17.241.20
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#geo-agnostic-everything-is-in-a-default-geo-catch-all","title":"Geo Agnostic (everything is in a default * geo catch all)","text":"This is the type of DNS Record structure that would back our default DNSPolicy.
-
gateway listener host name : www.example.com
DNSRecords:
- www.example.com CNAME lb-1ab1.www.example.com
- lb-1ab1.www.example.com CNAME geolocation * default.lb-1ab1.www.example.com
- default.lb-1ab1.www.example.com CNAME weighted 100 1bc1.lb-1ab1.www.example.com
- default.lb-1ab1.www.example.com CNAME weighted 100 aws.lb.com
- 1bc1.lb-1ab1.www.example.com A 192.22.2.1
So in the above example working up from the bottom, we have a mix of hostname and IP based addresses for the gateway instance. We have 2 evenly weighted records that balance between the two available gateways, then next we have the geo based record that is set to a default catch all as no geo has been specified then finally we have the actual listener hostname that points at our DNS based load balancer name.
DNSRecord Yaml
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n name: {gateway-name}-{listenerName}\n namespace: multi-cluster-gateways\nspec:\n dnsName: www.example.com\n managedZone:\n name: mgc-dev-mz\n endpoints:\n\n - dnsName: www.example.com\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-1ab1.www.example.com\n - dnsName: lb-1ab1.www.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: mygateway-multicluster-gateways\n providerSpecific:\n - name: \"geolocation-country-code\"\n value: \"*\"\n targets:\n - default.lb-1ab1.www.example.com\n - dnsName: default.lb-1ab1.www.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: cluster1\n providerSpecific:\n - name: \"weight\"\n value: \"100\"\n targets:\n - 1bc1.lb-1ab1.www.example.com\n - dnsName: default.lb-a1b2.shop.example.com\n recordTTL: 300\n recordType: CNAME\n setIdentifier: cluster2\n providerSpecific:\n - name: \"weight\"\n value: \"100\"\n targets:\n - aws.lb.com\n - dnsName: 1bc1.lb-1ab1.www.example.com\n recordTTL: 60\n recordType: A\n targets:\n - 192.22.2.1\n
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#geo-specific","title":"geo specific","text":"Once the end user selects to use a geo strategy via the DNSPolicy, we then need to restructure our DNS to add in our geo specific records. Here the default record
lb short code is {gw name + gw namespace} gw short code is {cluster name}
-
gateway listener host : shop.example.com
DNSRecords:
- shop.example.com CNAME lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com CNAME geolocation ireland ie.lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com geolocation australia aus.lb-a1b2.shop.example.com
- lb-a1b2.shop.example.com geolocation default ie.lb-a1b2.shop.example.com (set by the default geo option)
- ie.lb-a1b2.shop.example.com CNAME weighted 100 ab1.lb-a1b2.shop.example.com
- ie.lb-a1b2.shop.example.com CNAME weighted 100 aws.lb.com
- aus.lb-a1b2.shop.example.com CNAME weighted 100 ab2.lb-a1b2.shop.example.com
- aus.lb-a1b2.shop.example.com CNAME weighted 100 ab3.lb-a1b2.shop.example.com
- ab1.lb-a1b2.shop.example.com A 192.22.2.1 192.22.2.5
- ab2.lb-a1b2.shop.example.com A 192.22.2.3
- ab3.lb-a1b2.shop.example.com A 192.22.2.4
In the above example we move from a default catch all to geo specific setup. Based on a DNSPolicy that specifies IE as the default geo location. We leave the default
subdomain in place to allow for clients that may still be using that and set up geo specific subdomains that allow us to route traffic based on its origin. In this example we are load balancing across 2 geos and 4 clusters
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#wildcards","title":"WildCards","text":"In the examples we have used fully qualified domain names, however sometimes it may be required to use a wildcard subdomain. example:
- gateway listener host : *.example.com
To support these we need to change the name of the DNSRecord away from the name of the listener as the k8s resource does not allow * in the name.
To do this we will set the dns record resource name to be a combination of {gateway-name}-{listenerName}
to keep a record of the host this is for we will set a top level property named dnsName
. You can see an example in the DNSRecord above.
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#pros","title":"Pros","text":"This setup allows us a powerful set of features and flexibility
"},{"location":"multicluster-gateway-controller/docs/proposals/DNSRecordStructure/#cons","title":"Cons","text":"With this CNAME based approach we are increasing the number of DNS lookups required to get to an IP which will increase the cost and add a small amount of latency. To counteract this, we will set a reasonably high TTL (at least 5 mins) for our CNAMES and (2 mins) for A records
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/","title":"Multiple DNS Provider Support","text":"Authors: Michael Nairn @mikenairn
Epic: https://github.com/Kuadrant/multicluster-gateway-controller/issues/189
Date: 25th May 2023
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#job-stories","title":"Job Stories","text":" - As a developer, I want to use MGC with a domain hosted in one of the major cloud DNS providers (Google Cloud DNS, Azure DNS or AWS Route53)
- As a developer, I want to use multiple domains with a single instance of MGC, each hosted on different cloud providers
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#goals","title":"Goals","text":" - Add ManagedZone and DNSRecord support for Google Cloud DNS
- Add ManagedZone and DNSRecord support for Azure DNS
- Add DNSRecord support for CoreDNS (Default for development environment)
- Update ManagedZone and DNSRecord support for AWS Route53
- Add support for multiple providers with a single instance of MGC
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#non-goals","title":"Non Goals","text":" - Support for every DNS provider
- Support for health checks
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#current-approach","title":"Current Approach","text":"Currently, MGC only supports AWS Route53 as a dns provider. A single instance of a DNSProvider resource is created per MGC instance which is configured with AWS config loaded from the environment. This provider is loaded into all controllers requiring dns access (ManagedZone and DNSRecord reconciliations), allowing a single instance of MGC to operate against a single account on a single dns provider.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#proposed-solution","title":"Proposed Solution","text":"MGC has three features it requires of any DNS provider in order to offer full support, DNSRecord management, Zone management and DNS Health checks. We do not however want to limit to providers that only offer this functionality, so to add support for a provider the minimum that provider should offer is API access to managed DNS records. MGC will continue to provide Zone management and DNS Health checks support on a per-provider basis.
Support will be added for AWS(Route53), Google(Google Cloud DNS), Azure and investigation into possible adding CoreDNS (intended for local dev purposes), with the following proposed initial support:
Provider DNS Records DNS Zones DNS Health AWS Route53 X X X Google Cloud DNS X X - AzureDNS X X - CoreDNS X - - Add DNSProvider as an API for MGC which contains all the required config for that particular provider including the credentials. This can be thought of in a similar way to a cert manager Issuer. Update ManagedZone to add a reference to a DNSProvider. This will be a required field on the ManagedZone and a DNSProvider must exist before a ManagedZone can be created. Update all controllers load the DNSProvider directly from the ManagedZone during reconciliation loops and remove the single controller wide instance. Add new provider implementations for google, azure and coredns.
* All providers constructors should accept a single struct containing all required config for that particular provider.\n* Providers must be configured from credentials passed in the config and not rely on environment variables.\n
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#other-solutions-investigated","title":"Other Solutions investigated","text":"Investigation was carried out into the suitability of [External DNS] (https://github.com/kubernetes-sigs/external-dns) as the sole means of managing dns resources. Unfortunately, while external dns does offer support for basic dns record management with a wide range of providers, there were too many features missing making it unsuitable at this time for integration.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#external-dns-as-a-separate-controller","title":"External DNS as a separate controller","text":"Run external dns, as intended, as a separate controller alongside mgc, and pass all responsibility for reconciling DNSRecord resources to it. All DNSRecord reconciliation is removed from MGC.
Issues:
- A single instance of external dns will only work with a single provider and a single set of credentials. As it is, in order to support more than a single provider, more than one external dns instance would need to be created, one for each provider/account pair.
- Geo and Weighted routing policies are not implemented for any provider other than AWS Route53.
- Only supports basic dns record management (A,CNAME, NS records etc ..), with no support for managed zones or health checks.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#external-dns-as-a-module-dependency","title":"External DNS as a module dependency","text":"Add external dns as a module dependency in order to make use of their DNS Providers, but continue to reconcile DNSRecords in MGC.
Issues:
- External DNS Providers all create clients using the current environment. Would require extensive refactoring in order to modify each provider to optionally be constructed using static credentials.
- Clients were all internal making it impossible, without modification, to use the upstream code to extend the provider behaviour to support additional functionality such as managed zone creation.
"},{"location":"multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/#checklist","title":"Checklist","text":" - [ ] An epic has been created and linked to
- [ ] Reviewers have been added. It is important that the right reviewers are selected.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/","title":"Provider agnostic DNS Health checks","text":""},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#introduction","title":"Introduction","text":"The MGC has the ability to extend the DNS configuration of the gateway with the DNSPolicy resource. This resource allows users to configure health checks. As a result of configuring health checks, the controller creates the health checks in Route53, attaching them to the related DNS records. This has the benefit of automatically disabling an endpoint if it becomes unhealthy, and enabling it again when it becomes healthy again.
This feature has a few shortfalls:
- It\u2019s tightly coupled with Route53. If other DNS providers are supported they must either provide a similar feature, or health checks will not be supported
- Lacks the ability to reach endpoints in private networks
- requires using the gateway controller to implement, maintain and test multiple providers
This document describes a proposal to extend the current health check implementation to overcome these shortfalls.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#goals","title":"Goals","text":" - Ability to configure health checks in the DNSPolicy associated to a Gateway
- DNS records are disabled when the associated health check fails
- Current status of the defined health checks is visible to the end user
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#nongoals","title":"Nongoals","text":" - Ability for the health checks to reach endpoints in separate private networks
- Transparently keep support for other health check providers like Route53
- Having health checks for wildcard listeners
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#use-cases","title":"Use-cases","text":" - As a gateway administrator, I would like to define a health check that each service sitting behind a particular listener across the production clusters has to implement to ensure we can automatically respond, failover and mitigate a failing instance of the service
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#proposal","title":"Proposal","text":"Currently, this functionality will be added to the existing MGC, and executed within that component. This will be created with the knowledge that it may need to be made into an external component in the future.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnspolicy-resource","title":"DNSPolicy
resource","text":"The presence of the healthCheck
means that for every DNS endpoint (that is either an A record, or a CNAME to an external host), a health check is created based on the health check configuration in the DNSPolicy.
A failureThreshold
field will be added to the health spec, allowing users to configure a number of consecutive health check failures that must be observed before the endpoint is considered unhealthy.
Example DNS Policy with a defined health check.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n healthCheck:\n endpoint: /health\n failureThreshold: 5\n port: 443\n protocol: https\n additionalHeaders: <SecretRef>\n expectedResponses:\n\n - 200\n - 301\n - 302\n - 407\n AllowInsecureCertificates: true\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: prod-web\n namespace: multi-cluster-gateways\n
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnshealthcheckprobe-resource","title":"DNSHealthCheckProbe
resource","text":"The DNSHealthCheckProbe resource configures a health probe in the controller to perform the health checks against an identified final A or CNAME endpoint. When created by the controller as a result of a DNS Policy, this will have an owner ref of the DNS Policy that caused it to be created.
apiVersion: kuadrant.io/v1alpha1\nkind: DNSHealthCheckProbe\nmetadata:\n name: example-probe\nspec:\n port: \"...\"\n host: \u201c...\u201d\n address: \"...\"\n path: \"...\"\n protocol: \"...\"\n interval: \"...\"\n additionalHeaders: <SecretRef>\n expectedResponses:\n\n - 200\n 201\n 301\n AllowInsecureCertificate: true\nstatus:\n healthy: true\n consecutiveFailures: 0\n reason: \"\"\n lastCheck: \"...\"\n
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#spec-fields-definition","title":"Spec Fields Definition","text":" - Port The port to use
- Address The address to connect to (e.g. IP address or hostname of a clusters loadbalancer)
- Host The host to request in the Host header
- Path The path to request
- Protocol The protocol to use for this request
- Interval How frequently this check would ideally be executed.
- AdditionalHeaders Optional secret ref which contains k/v: headers and their values that can be specified to ensure the health check is successful.
- ExpectedResponses Optional HTTP response codes that should be considered healthy (defaults are 200 and 201).
- AllowInsecureCertificate Optional flag to allow using invalid (e.g. self-signed) certificates, default is false.
The reconciliation of this resource results in the configuration of a health probe, which targets the endpoint and updates the status. The status is propagated to the providerSpecific status of the equivalent endpoint in the DNSRecord
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#changes-to-current-controllers","title":"Changes to current controllers","text":"In order to support this new feature, the following changes in the behaviour of the controllers are proposed.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnspolicy-controller","title":"DNSPolicy controller","text":"Currently, the reconciliation loop of this controller creates health checks in the configured DNS provider (Route53 currently) based on the spec of the DNSPolicy, separately from the reconciliation of the DNSRecords. The proposed change is to reconcile health check probe CRs based on the combination of DNS Records and DNS Policies.
Instead of Route53 health checks, the controller will create DNSHealthCheckProbe
resources.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dnsrecord-controller","title":"DNSRecord controller","text":"When reconciling a DNS Record, the DNS Record reconciler will retrieve the relevant DNSHealthCheckProbe CRs, and consult the status of them when determining what value to assign to a particular endpoint's weight.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#dns-record-structure-diagram","title":"DNS Record Structure Diagram:","text":"https://lucid.app/lucidchart/2f95c9c9-8ddf-4609-af37-48145c02ef7f/edit?viewport_loc=-188%2C-61%2C2400%2C1183%2C0_0&invitationId=inv_d5f35eb7-16a9-40ec-b568-38556de9b568 How
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removing-unhealthy-endpoints","title":"Removing unhealthy Endpoints","text":"When a DNS health check probe is failing, it will update the DNS Record CR with a custom field on that endpoint to mark it as failing.
There are then 3 scenarios which we need to consider: 1 - All endpoints are healthy 2 - All endpoints are unhealthy 3 - Some endpoints are healthy and some are unhealthy.
In the cases 1 and 2, the result should be the same: All records are published to the DNS Provider.
When scenario 3 is encountered the following process should be followed:
For each gateway IP or CNAME: this should be omitted if unhealthy.\nFor each managed gateway CNAME: This should be omitted if all child records are unhealthy.\nFor each GEO CNAME: This should be omitted if all the managed gateway CNAMEs have been omitted.\nLoad balancer CNAME: This should never be omitted.\n
If we consider the DNS record to be a hierarchy of parents and children, then whenever any parent has no healthy children that parent is also considered unhealthy. No unhealthy elements are to be included in the DNS Record.
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removal-process","title":"Removal Process","text":"When removing DNS records, we will want to avoid any NXDOMAIN
responses from the DNS service as this will cause the resolver to cache this missed domain for a while (30 minutes or more). The NXDOMAIN
response is triggered when the resolver attempts to resolve a host that does not have any records in the zone file.
The situation that would cause this to occur is when we have removed a record but still refer to it from other records.
As we wish to avoid any NXDOMAIN
responses from the nameserver - causing the resolver to cache this missed response we will need to ensure that any time a DNS Record (CNAME or A) is removed, we also remove any records that refer to the removed record. (e.g. when the gateway A record is removed, we will need to remove the managed gateway CNAME that refers to that A record).
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#removal-example","title":"Removal Example","text":"Given the following DNS Records (simplified hosts used in example):
01 host.example.com. 300 IN CNAME lb.hcpapps.net.\n02 lb.hcpapps.net. 60 IN CNAME default-geo.hcpapps.net.\n03 default-geo.hcpapps.net. 120 IN CNAME cluster1.hcpapps.net.\n04 default-geo.hcpapps.net. 120 IN CNAME cluster2.hcpapps.net.\n05 cluster1.hcpapps.net. 300 IN CNAME cluster1-gw1.hcpapps.net.\n06 cluster1.hcpapps.net. 300 IN CNAME cluster1-gw2.hcpapps.net.\n07 cluster2.hcpapps.net. 300 IN CNAME cluster2-gw1.hcpapps.net.\n08 cluster2.hcpapps.net. 300 IN CNAME cluster2-gw2.hcpapps.net.\n09 cluster1-gw1.hcpapps.net. 60 IN CNAME cluster1-gw1.aws.com.\n10 cluster1-gw2.hcpapps.net. 60 IN CNAME cluster1-gw2.aws.com.\n11 cluster2-gw1.hcpapps.net. 60 IN CNAME cluster2-gw1.aws.com.\n12 cluster2-gw2.hcpapps.net. 60 IN CNAME cluster2-gw2.aws.com.\n
cases: - Record 09 becomes unhealthy: remove records 09 and 05.
- Record 09 and 10 become unhealthy: remove records 09, 10, 05, 06, 03
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#further-reading","title":"Further reading","text":"Domain Names RFC: https://datatracker.ietf.org/doc/html/rfc1034
"},{"location":"multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/#executing-the-probes","title":"Executing the probes","text":"There will be a DNSHealthCheckProbe CR controller added to the controller. This controller will create an instance of a HealthMonitor
, the HealthMonitor ensures that each DNSHealthCheckProbe CR has a matching probeQueuer object running. It will also handle both the updating of the probeQueuer on CR update and the removal of probeQueuers, when a DNSHealthcheckProbe is removed.
The ProbeQueuer
will add a health check request to a queue based on a configured interval, this queue is consumed by a ProbeWorker
, probeQueuers work on their own goroutine.
The ProbeWorker is responsible for actually executing the probe, and updating the DNSHealthCheckProbe CR status. The probeWorker executes on its own goroutine.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/","title":"Proposal: Aggregation of Status Conditions","text":""},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#background","title":"Background","text":"Status conditions are used to represent the current state of a resource and provide information about any problems or issues that might be affecting it. They are defined as an array of Condition objects within the status section of a resource's YAML definition.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#problem-statement","title":"Problem Statement","text":"When multiple instances of a resource (e.g. a Gateway) are running across multiple clusters, it can be difficult to know the current state of each instance without checking each one individually. This can be time-consuming and error-prone, especially when there are a large number of clusters or resources.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#proposal","title":"Proposal","text":"To solve this problem, I'm proposing we leverage the status block in the control plane instance of that resource, aggregating the statuses to convey the necessary information.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#status-conditions","title":"Status Conditions","text":"For example, if the Ready
status condition type of a Gateway
is True
for all instances of the Gateway
resource across all clusters, then the Gateway
in the control plane will have the Ready
status condition type also set to True
.
status:\n conditions:\n\n - type: Ready\n status: True\n message: All listeners are valid\n
If the Ready
status condition type of some instances is not True
, the Ready
status condition type of the Gateway
in the control plane will be False
.
status:\n conditions:\n\n - type: Ready\n status: False\n
In addition, if the Ready
status condition type is False
, the Gateway
in the control plane should include a status message for each Gateway
instance where Ready
is False
. This message would indicate the reason why the condition is not true for each Gateway
.
status:\n conditions:\n\n - type: Ready\n status: False\n message: \"gateway-1 Listener certificate is expired; gateway-3 No listener configured for port 80\"\n
In this example, the Ready
status condition type is False
because two of the three Gateway instances (gateway-1 and gateway-3) have issues with their listeners. For gateway-1, the reason for the False
condition is that the listener certificate is expired, and for gateway-3, the reason is that no listener is configured for port 80. These reasons are included as status messages in the Gateway
resource in the control plane.
As there may be different reasons for the condition being False
across different clusters, it doesn't make sense to aggregate the reason
field. The reason
field is intended to be a programmatic identifier, while the message
field allows for a human readable message i.e. a semi-colon separated list of messages.
The lastTransitionTime
and observedGeneration
fields will behave as normal for the resource in the control plane.
"},{"location":"multicluster-gateway-controller/docs/proposals/status-aggregation/#addresses-and-listeners-status","title":"Addresses and Listeners status","text":"The Gateway status can include information about addresses, like load balancer IP Addresses assigned to the Gateway, and listeners, such as the number of attached routes for each listener. This information is useful at the control plane level. For example, a DNS Record should only exist as long as there is at least 1 attached route for a listener. It can also be more complicated than that when it comes to multi cluster gateways. A DNS Record should only include the IP Addresses of the Gateway instances where the listener has at least 1 attached route. This is important when initial setup of DNS Records happen as applications start. It doesn't make sense to route traffic to a Gateway where a listener isn't ready/attached yet. It also comes into play when a Gateway is displaced either due to changing placement decision or removal.
In summary, the IP Addresses and number of attached routes per listener per Gateway instance is needed in the control plane to manage DNS effectively. This proposal adds that information the hub Gateway status block. This will ensure a decoupling of the DNS logic from the underlying resource/status syncing implementation (i.e. ManifestWork status feedback rules)
First, here are 2 instances of a multi cluster Gateway in 2 separate spoke clusters. The yaml is shortened to highlight the status block.
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n\n - type: IPAddress\n value: 172.31.200.0\n - type: IPAddress\n value: 172.31.201.0\n listeners:\n - attachedRoutes: 0\n conditions:\n name: api\n - attachedRoutes: 1\n conditions:\n name: web\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n - type: IPAddress\n value: 172.31.202.0\n - type: IPAddress\n value: 172.31.203.0\n listeners:\n - attachedRoutes: 1\n name: api\n - attachedRoutes: 1\n name: web\n
And here is the proposed status aggregation in the hub Gateway:
apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: gateway\nstatus:\n addresses:\n\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_1/172.31.200.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_1/172.31.201.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_2/172.31.202.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: cluster_2/172.31.203.0\n listeners:\n - attachedRoutes: 0\n name: cluster_1.api\n - attachedRoutes: 1\n name: cluster_1.web\n - attachedRoutes: 1\n name: cluster_2.api\n - attachedRoutes: 1\n name: cluster_2.web\n
The MultiCluster Gateway Controller will use a custom implementation of the addresses
and listenerers
fields. The address type
is of type AddressType, where the type is a domain-prefixed string identifier. The value can be split on the forward slash, /
, to give the cluster name and the underlying Gateway IPAddress value of type IPAddress. Both the IPAddress and Hostname types will be supported. The type strings for either will be kuadrant.io/MultiClusterIPAddress
and kuadrant.io/MultiClusterHostname
The listener name
is of type SectionName, with validation on allowed characters and max length of 253. The name can be split on the period, .
, to give the cluster name and the underlying listener name. As there are limits on the character length for the name
field, this puts a lower limit restriction on the cluster names and listener names used to ensure proper operation of this status aggregation. If the validation fails, a status condition showing a validation error should be included in the hub Gateway status block.
"},{"location":"multicluster-gateway-controller/docs/proposals/template/","title":"Proposal Template","text":"Authors: {authors names} Epic: {Issue of type epic this relates to} Date: {date proposed}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#job-stories","title":"Job Stories","text":"{ A bullet point list of stories this proposal solves}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#goals","title":"Goals","text":"{A bullet point list of the goals this will achieve}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#non-goals","title":"Non Goals","text":"{A bullet point list of goals that this will not achieve, IE scoping}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#current-approach","title":"Current Approach","text":"{outline the current approach if any}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#proposed-solution","title":"Proposed Solution","text":"{outline the proposed solution, links to diagrams and PRs can go here along with the details of your solution}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#testing","title":"Testing","text":"{outline any testing considerations. Does this need some form of load/performance test. Are there any considerations when thinking about an e2e test}
"},{"location":"multicluster-gateway-controller/docs/proposals/template/#checklist","title":"Checklist","text":" - [ ] An epic has been created and linked to
- [ ] Reviewers have been added. It is important that the right reviewers are selected.
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/aws/aws/","title":"AWS DNS","text":"AWS supports Weighted(Weighted Round Robin) and Geolocation routing policies https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html. Both of these can be configured directly on records in AWS route 53.
GEO Weighted
Weighted
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/azure/azure/","title":"Azure DNS","text":""},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/azure/azure/#azure","title":"Azure","text":"https://portal.azure.com/
Azure supports Weighted and Geolocation routing policies, but requires records to alias to a Traffic Manager resource that must also be created in the users account https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-routing-methods
Notes:
- A Traffic Manager Profile is created per record set and is created with a routing method (Weighted or Geographic) https://portal.azure.com/#view/Microsoft_Azure_Network/LoadBalancingHubMenuBlade/~/TrafficManagers
- Only a singe IP can be added to a DNSRecord set. A traffic manager profile must be created and aliased from a DNSRecord set for anything that involves more than a single target.
- Significantly more resources to manage in order to achieve functionality comparable with Google and AWS.
- The modelling of the records is significantly different from AWS Route53, but the current DNSRecord spec could still work. The azure implementation will have to process the endpoint list and create traffic manager policies as required to satisfy the record set.
Given the example DNSRecord here describing a record set for a geo location routing policy with four clusters, two in two regions (North America and Europe), the following Azure resources are required.
Three DNSRecords, each aliased to a different traffic manager:
- dnsrecord-geo-azure-hcpapps-net (dnsrecord-geo.azure.hcpapps.net) aliased to Traffic Manager Profile 1 (dnsrecord-geo-azure-hcpapps-net)
- dnsrecord-geo-na.azure-hcpapps-net (dnsrecord-geo.na.azure.hcpapps.net) aliased to Traffic Manager Profile 2 (dnsrecord-geo-na-azure-hcpapps-net)
- dnsrecord-geo-eu.azure-hcpapps-net (dnsrecord-geo.eu.azure.hcpapps.net) aliased to Traffic Manager Profile 3 (dnsrecord-geo-eu-azure-hcpapps-net)
Three Traffic Manager Profiles:
- Traffic Manager Profile 1 (dnsrecord-geo-azure-hcpapps-net): Geolocation routing policy with two region specific FQDN targets (dnsrecord-geo.eu.azure.hcpapps.net and dnsrecord-geo.na.azure.hcpapps.net).
- Traffic Manager Profile 2 (dnsrecord-geo-na-azure-hcpapps-net): Weighted routed policy with two IP address endpoints (172.31.0.1 and 172.31.0.2) with equal weighting.
- Traffic Manager Profile 3 (dnsrecord-geo-eu-azure-hcpapps-net): Weighted routed policy with two IP address endpoints (172.31.0.3 and 172.31.0.4) with equal weighting.
dig dnsrecord-geo.azure.hcpapps.net\n\n; <<>> DiG 9.18.12 <<>> dnsrecord-geo.azure.hcpapps.net\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 16236\n;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 65494\n;; QUESTION SECTION:\n;dnsrecord-geo.azure.hcpapps.net. IN A\n\n;; ANSWER SECTION:\ndnsrecord-geo.azure.hcpapps.net. 60 IN CNAME dnsrecord-geo-azure-hcpapps-net.trafficmanager.net.\ndnsrecord-geo-azure-hcpapps-net.trafficmanager.net. 60 IN CNAME dnsrecord-geo.eu.azure.hcpapps.net.\ndnsrecord-geo.eu.azure.hcpapps.net. 60 IN A 172.31.0.3\n\n;; Query time: 88 msec\n;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)\n;; WHEN: Tue May 30 15:05:07 IST 2023\n;; MSG SIZE rcvd: 168\n
"},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/google/google/","title":"Google DNS","text":""},{"location":"multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/google/google/#google","title":"Google","text":"https://console.cloud.google.com/net-services/dns/zones
Google supports Weighted(Weighted Round Robin) and Geolocation routing policies https://cloud.google.com/dns/docs/zones/manage-routing-policies. Both of these can be configured directly on records in Google Cloud DNS and no secondary Traffic Management resource is required.
Notes:
- Record sets are modelled as a single endpoint with routing policy embedded. This is a different approach to Route53 where each individual A/CNAME would have its own record entry.
- Weight must be an integer between 0 - 10000
- There are no continent options for region, only finer grained regions such as us-east1, europe-west-1 etc...
- There appears to be no way to set a default region, google just routes requests to the nearest supported region.
- The current approach used in AWS Route53 for geo routing will work in the same way on Google DNS. A single CNAME record with geo routing policy specifying multiple geo specific A record entries as targets.
- Geo and weighted routing can be combined, as with AWS Route53, allowing traffic with a region to be routed using weightings.
- The modelling of the records is slightly different from AWS, but the current DNSRecord spec could still work. The Google implementation of AddRecords will have to process the list of endpoints in order to group related endpoints in order to build up the required API request. In this case there would not be a 1:1 mapping between an endpoint in a DNSRecord and the dns provider, but the DNSRecord contents would be kept consistent across all providers and compatibility with external-dns would be maintained.
Example request for Geo CNAME record:
POST https://dns.googleapis.com/dns/v1beta2/projects/it-cloud-gcp-rd-midd-san/managedZones/google-hcpapps-net/rrsets
{\n \"name\": \"dnsrecord-geo.google.hcpapps.net.\",\n \"routingPolicy\": {\n \"geo\": {\n \"item\": [\n {\n \"location\": \"us-east1\",\n \"rrdata\": [\n \"dnsrecord-geo.na.google.hcpapps.net.\"\n ]\n },\n {\n \"location\": \"europe-west1\",\n \"rrdata\": [\n \"dnsrecord-geo.eu.google.hcpapps.net.\"\n ]\n }\n ],\n \"enableFencing\": false\n }\n },\n \"ttl\": 60,\n \"type\": \"CNAME\"\n}\n
Example request for Weighted A record:
POST https://dns.googleapis.com/dns/v1beta2/projects/it-cloud-gcp-rd-midd-san/managedZones/google-hcpapps-net/rrsets
{\n \"name\": \"dnsrecord-geo.na.google.hcpapps.net.\",\n \"routingPolicy\": {\n \"wrr\": {\n \"item\": [\n {\n \"weight\": 60.0,\n \"rrdata\": [\n \"172.31.0.1\"\n ]\n },\n {\n \"weight\": 60.0,\n \"rrdata\": [\n \"172.31.0.2\"\n ]\n }\n ]\n }\n },\n \"ttl\": 60,\n \"type\": \"A\"\n}\n
Given the example DNSRecord here describing a record set for a geo location routing policy with four clusters, two in two regions (North America and Europe), the following resources are required.
Three DNSRecords, one CNAME (dnsrecord-geo.google.hcpapps.net) and 2 A records (dnsrecord-geo.na.google.hcpapps.net and dnsrecord-geo.eu.google.hcpapps.net)
dig dnsrecord-geo.google.hcpapps.net\n\n; <<>> DiG 9.18.12 <<>> dnsrecord-geo.google.hcpapps.net\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 22504\n;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 65494\n;; QUESTION SECTION:\n;dnsrecord-geo.google.hcpapps.net. IN A\n\n;; ANSWER SECTION:\ndnsrecord-geo.google.hcpapps.net. 60 IN CNAME dnsrecord-geo.eu.google.hcpapps.net.\ndnsrecord-geo.eu.google.hcpapps.net. 60 IN A 172.31.0.4\n\n;; Query time: 33 msec\n;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)\n;; WHEN: Tue May 30 15:05:25 IST 2023\n;; MSG SIZE rcvd: 108\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/","title":"Olm","text":""},{"location":"multicluster-gateway-controller/docs/versioning/olm/#how-to-create-a-mgc-olm-bundle-catalog-and-how-to-install-mgc-via-olm","title":"How to create a MGC OLM bundle, catalog and how to install MGC via OLM","text":" NOTE: You can supply different env vars to the following make commands these include:
* Version using the env var VERSION \n* Tag via the env var IMAGE_TAG for tags not following the semantic format.\n* Image registry via the env var REGISTRY\n* Registry org via the env var ORG\n\nFor example\n
make bundle-build-push VERISON=2.0.1 make catalog-build-push IMAGE_TAG=asdf
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#creating-the-bundle","title":"Creating the bundle","text":" - Generate build and push the OLM bundle manifests for MGC, run the following make target:
make bundle-build-push\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#creating-the-catalog","title":"Creating the catalog","text":" - Build and push the catalog image
make catalog-build-push\n
"},{"location":"multicluster-gateway-controller/docs/versioning/olm/#installing-the-operator-via-olm-catalog","title":"Installing the operator via OLM catalog","text":" -
Create a namespace:
cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Namespace\nmetadata:\n name: multi-cluster-gateways-system\nEOF\n
-
Create a catalog source: ```bash cat <<EOF | kubectl apply -f - apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: mgc-catalog namespace: olm spec: sourceType: grpc image: quay.io/kuadrant/multicluster-gateway-controller-catalog:v6.5.4 grpcPodConfig: securityContextConfig: restricted displayName: mgc-catalog publisher: Red Hat EOF
3. Create a subscription\n```bash\n cat <<EOF | kubectl apply -f -\napiVersion: operators.coreos.com/v1alpha1\nkind: Subscription\nmetadata:\n name: multicluster-gateway-controller\n namespace: multi-cluster-gateways-system\nspec:\n channel: alpha\n name: multicluster-gateway-controller\n source: mgc-catalog\n sourceNamespace: olm\n installPlanApproval: Automatic\nEOF\n
- Create a operator group
bash cat <<EOF | kubectl apply -f - apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: og-mgc namespace: multi-cluster-gateways-system EOF
For more information on each of these OLM resources please see the offical docs
"},{"location":"architecture/docs/design/architectural-overview-v1/","title":"Kuadrant Architectural Overview","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#overview","title":"Overview","text":"Kuadrant provides connectivity, security and service protection capabilities in both a single and multi-cluster environment. It exposes these capabilities in the form of Kubernetes CRDs that implement the Gateway API concept of policy attachment. These policy APIs can target specific Gateway API resources such as Gateways
and HTTPRoutes
to extend their capabilities and configuration. They enable platform engineers to secure, protect and connect their infrastructure and allow application developers to self service and refine policies to their specific needs in order to protect exposed endpoints.
"},{"location":"architecture/docs/design/architectural-overview-v1/#key-architectural-areas","title":"Key Architectural Areas","text":" - Kuadrant architecture is defined and implemented with both control plane and data plane components.
- The control plane is where policies are exposed and expressed as Kubernetes APIs and reconciled by a policy controller.
- The data plane is where Kuadrant's \"policy enforcement\" components exist. These components are configured by the control plane and integrate either directly with the Gateway provider or via external integrations.
"},{"location":"architecture/docs/design/architectural-overview-v1/#10000m-architecture","title":"10000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"The control plane is a set of controllers and operators that are responsible for for installation and configuration of other components such as the data plane enforcement components and configuration of the Gateway to enable the data plane components to interact with incoming requests. The control plane also owns and reconciles the policy CRD APIs into more complex and specific configuration objects that the policy enforcement components consume in order to know the rules to apply to incoming requests or the configuration to apply to external integrations such as DNS and ACME providers.
"},{"location":"architecture/docs/design/architectural-overview-v1/#kuadrant-operator","title":"Kuadrant Operator","text":" - Installation and configuration of other control plane components
- Installation of data plane policy enforcement components via their respective control plane operators
- Configures the Gateway via WASM plugin and other APIs to leverage the data plane components for auth and rate limiting on incoming requests.
- Exposes
RateLimitPolicy
, AuthPolicy
, DNSPolicy
and TLSPolicy
and reconciles these into enforceable configuration for the data plane. - Exposes
Kuadrant
and reconciles this to configure and trigger installation of the required data plane components and other control plane components.
"},{"location":"architecture/docs/design/architectural-overview-v1/#limitador-operator","title":"Limitador Operator:","text":" - Installs and configures the Limitador data plane component based on the Limitador CR. Limits specified in the limitador CR are mountd via configmap into the limitador component.
"},{"location":"architecture/docs/design/architectural-overview-v1/#authorino-operator","title":"Authorino Operator:","text":" - Installs and configures the Authorino data plane component based on the Authorino CR.
"},{"location":"architecture/docs/design/architectural-overview-v1/#cert-manager","title":"Cert-Manager:","text":" - Manages TLS certificates for our components and for the Gateways. Consumes Certificate resources created by Kuadrant operator in response to the TLSPolicy.
"},{"location":"architecture/docs/design/architectural-overview-v1/#dns-operator","title":"DNS Operator","text":" - DNS operator consumes DNSRecord resources that are configured via the DNSPolicy api and applies them into the targeted cloud DNS provider AWS, Azure and Google DNS are our main targets
"},{"location":"architecture/docs/design/architectural-overview-v1/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"The data plane components sit in the request flow and are responsible for enforcing configuration defined by policy and providing service protection capabilities based on configuration managed and created by the control plane.
"},{"location":"architecture/docs/design/architectural-overview-v1/#limitador","title":"Limitador","text":" - Complies with the with Envoy rate limiting API to provide rate limiting to the gateway. Consumes limits from a configmap created based on the RateLimitPolicy API.
"},{"location":"architecture/docs/design/architectural-overview-v1/#authorino","title":"Authorino","text":" - Complies with the Envoy external auth API to provide auth integration to the gateway. It provides both Authn and Authz. Consumes AuthConfigs created by the kuadrant operator based on the defined
AuthPolicy
API.
"},{"location":"architecture/docs/design/architectural-overview-v1/#wasm-shim","title":"WASM Shim","text":" - Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting.
"},{"location":"architecture/docs/design/architectural-overview-v1/#single-cluster-layout","title":"Single Cluster Layout","text":"In a single cluster, you have the Kuadrant control plane and data plane sitting together. It is configured to integrate with Gateways on the same cluster and configure a DNS zone via a DNS provider secret (configured alongside a DNSPolicy). Storage of rate limit counters is possible but not required as they are not being shared.
"},{"location":"architecture/docs/design/architectural-overview-v1/#multi-cluster","title":"Multi-Cluster","text":"In the default multi-cluster setup. Each individual cluster has Kuadrant installed. Each of these clusters are unaware of the other. They are effectively operating as single clusters. The multi-cluster aspect is created by sharing access with the DNS zone, using a shared host across the clusters and leveraging shared counter storage. The zone is operated on independently by each of DNS operator on both clusters to form a single cohesive record set. More details on this can be found in the following RFC document: TODO add link. The rate limit counters can also be shared and used by different clusters in order to provide global rate limiting. This is achieved by connecting each instance of Limitador to a shared data store that uses the Redis protocol. There is another option available for achieving multi-cluster connectivity (see intgrations below) that requires the use of a \"hub\" cluster and integration with OCM (open cluster management).
Shown above is a multi-cluster multi ingress gateway topology. This might be used to support a geographically distributed system for example. However, it is also possible to leverage overlay networking tools such as Skupper that integrate at the Kubernetes service level to have a single gateway cluster that then integrates with multiple backends (on different clusters or in custom infrastructure).
"},{"location":"architecture/docs/design/architectural-overview-v1/#dependencies","title":"Dependencies","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#istio-required","title":"Istio: Required","text":" - Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities. Kuadrant configures Envoy via the Istio control plane in order to enforce the applied policies and register components such as Authorino and Limitador.
- Used by
RateLimitPolicy
and AuthPolicy
"},{"location":"architecture/docs/design/architectural-overview-v1/#gateway-api-required","title":"Gateway API: Required","text":" - New standard for Ingress from the Kubernetes community
- Gateway API is the core API that Kuadrant integrates with.
"},{"location":"architecture/docs/design/architectural-overview-v1/#integrations","title":"Integrations","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#open-cluster-manager-optional","title":"Open Cluster Manager: Optional","text":" - Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
While the default setup is to leverage a distributed configuration for DNS and rate limiting. There is also a component that offers experimental integration with Open Cluster Management.
In this setup, the OCM integration controller is installed into the HUB alongside the DNS Operator and the cert-manager. This integration allows you to define gateways in the Hub and distribute them to \"spoke\" clusters. The addresses of these gateways are gathered from the spokes and aggregated back to the hub. The Kuadrant operator and DNS operator then act on this information as though it were a single cluster gateway with multiple addresses. The DNS zone in the configured DNS provider is managed centrally by one DNS operator instance.
"},{"location":"architecture/docs/design/architectural-overview/","title":"Kuadrant Architectural Overview [Draft]","text":""},{"location":"architecture/docs/design/architectural-overview/#overview","title":"Overview","text":"It is important to note that Kuadrant is not in itself a gateway provider. Kuadrant provides a set of valuable policy APIs that enhance Gateway API via its defined policy attachment extension point. The policy APIs are reconciled by a set of policy controllers and enforced via integration at different points to configure, enhance and secure the application connectivity provided via Gateway API and the underlying gateway provider. These policy extensions are focused around areas such as DNS management supporting global load balancing and health checks, alongside service protection specific APIs such as rate limiting and auth. Kuadrant also integrates with Open Cluster Management as a multi-cluster control plane to enable defining and distributing Gateways across multiple clusters, providing load balancing and tls management for these distributed gateways. These integrations and features can be managed centrally in a declarative way from the Open Cluster Management Hub using Kubernetes resources.
"},{"location":"architecture/docs/design/architectural-overview/#key-architectural-areas","title":"Key Architectural Areas","text":" - The Kuadrant architecture is spread across a control plane and also a data plane. Kuadrant can work in both a single and multi-cluster context. Currently in order for all APIs to work in a single or multi-cluster context you need to have Open Cluster Management installed. While this may change in the future, this approach allows us to start with a single cluster and seamlessly scale as more clusters are added.
- The control plane is where policies are exposed and expressed as kubernetes APIs and reconciled by the Kuadrant policy controllers.
- The data plane is where Kuadrant's service protection components, configured by the control plane policies, are enforced within the gateway instance as part of the request flow.
"},{"location":"architecture/docs/design/architectural-overview/#1000m-architecture","title":"1000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"A control plane component is something responsible for accepting instruction via a CRD based API and ensuring that configuration is manifested into state that can be acted on.
"},{"location":"architecture/docs/design/architectural-overview/#kuadrant-operator","title":"Kuadrant Operator","text":" - Installation of data plane service protection components via their respective operators
- Exposes
RateLimitPolicy
and AuthPolicy
and is currently the policy controller for these APIs - Configures the Gateway to be able to leverage the data plane service protection components
"},{"location":"architecture/docs/design/architectural-overview/#multi-cluster-gateway-controller","title":"Multi-Cluster Gateway Controller","text":" - Exposes
DNSPolicy
and TLSPolicy
- Configures DNS providers (e.g AWS Route 53) and TLS providers
- Focused around use cases involving distributed gateways (for example across clouds or geographic regions)
- Integrates with Open Cluster Management as the multi-cluster management hub to distribute and observe gateway status based on the clusters they are deployed to. Works directly with Open Cluster Management APIs such
PlacementDecision
and ManifestWork
.
"},{"location":"architecture/docs/design/architectural-overview/#kuadrant-add-on-manager","title":"Kuadrant-add-on-manager","text":" - Sub component in the gateway controller repository
- Follows the add-on pattern from Open Cluster Management
- Responsible for configuring and installing Kuadrant into a target spoke cluster
"},{"location":"architecture/docs/design/architectural-overview/#limitador-operator","title":"Limitador Operator:","text":" - Installs and configures Limitador
"},{"location":"architecture/docs/design/architectural-overview/#authorino-operator","title":"Authorino Operator:","text":" - Installs and configures Authorino
"},{"location":"architecture/docs/design/architectural-overview/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"A data plane component sits in the request flow and is responsible for enforcing policy and providing service protection capabilities based on configuration managed and created by the control plane.
"},{"location":"architecture/docs/design/architectural-overview/#limitador","title":"Limitador","text":" - Complies with the with Envoy rate limiting API to provide rate limiting to the gateway
"},{"location":"architecture/docs/design/architectural-overview/#authorino","title":"Authorino","text":" - Complies with the Envoy external auth API to provide auth integration to the gateway
"},{"location":"architecture/docs/design/architectural-overview/#wasm-shim","title":"WASM Shim","text":" - Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting
"},{"location":"architecture/docs/design/architectural-overview/#dependencies-and-integrations","title":"Dependencies and integrations","text":"In order to provide its full suite of functionality, Kuadrant has several dependencies. Some of these are optional depending on the functionality needed.
"},{"location":"architecture/docs/design/architectural-overview/#cert-manager-required","title":"Cert-Manager: Required","text":" - Provides TLS integration
- Used by
TLSPolicy
and Authorino.
"},{"location":"architecture/docs/design/architectural-overview/#open-cluster-manager-required","title":"Open Cluster Manager: Required","text":" - Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
"},{"location":"architecture/docs/design/architectural-overview/#istio-required","title":"Istio: Required","text":" - Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities.
- Used by
RateLimitPolicy
and AuthPolicy
"},{"location":"architecture/docs/design/architectural-overview/#gateway-api-required","title":"Gateway API: Required","text":" - New standard for Ingress from the Kubernetes community
- Gateway API is the core API that Kuadrant integrates with.
"},{"location":"architecture/docs/design/architectural-overview/#thanosprometheusgrafana-optional","title":"Thanos/Prometheus/Grafana: Optional","text":" - Provides observability integration
- Rather than providing any Kuadrant specific observability tooling, we instead look to leverage existing tools and technologies to provide observability capabilities for ingress.
"},{"location":"architecture/docs/design/architectural-overview/#high-level-multi-cluster-architecture","title":"High Level Multi-Cluster Architecture","text":"Kuadrant has a multi-cluster gateway controller that is intended to run in a Open Cluster Management provided \"Hub\" cluster. This cluster is effectively a central management cluster where policy and gateways along with all that Open Cluster Management offers can be defined and distributed to the managed \"spoke\" clusters.
"},{"location":"architecture/docs/design/architectural-overview/#single-cluster","title":"Single cluster","text":"In a single cluster context, the overall architecture remains the same as above, the key difference is that the Hub and Spoke cluster are now a single cluster rather than multiple clusters. This is how we are initially supporting single cluster.
"},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-leverage-open-cluster-management","title":"How does Kuadrant leverage Open Cluster Management?","text":"Kuadrant deploys a multi-cluster gateway controller into the Open Cluster Management hub (a control plane that manages a set of \"spoke\" clusters where workloads are executed). This controller offers its own APIs but also integrates with hub CRD based APIs (such as the placement API) along with the Gateway API CRD based APIs in order to provide multi-cluster Gateway capabilities to the hub and distribute actual gateway instances to the spokes. See the Open Cluster Management docs for further details on the hub spoke architecture.
As part of installing Kuadrant, the Gateway API CRDs are also installed into the hub cluster and Kuadrant defines a standard Gateway API GatewayClass
resource that the multi-cluster gateway controller is the chosen controller for.
Once installed, an Open Cluster Management user can then (with the correct RBAC in place) define in the standard way a Gateway resource that inherits from the Kuadrant configured GatewayClass
in the hub. There is nothing unique about this Gateway definition, the difference is what it represents and how it is used. This Gateway is used to represent a \"multi-cluster\" distributed gateway. As such there are no pods running behind this Gateway instance in the hub cluster, instead it serves as a template that the Kuadrant multi-cluster gateway controller reconciles and distributes to targeted spoke clusters. It leverages the Open Cluster Management APIs to distribute these gateways (more info below) and aggregates the status information from each spoke cluster instance of this gateway back to this central definition, in doing this it can represent the status of the gateway across multiple clusters but also use that information to integrate with DNS providers etc.
"},{"location":"architecture/docs/design/architectural-overview/#gateway-deployment-and-distribution","title":"Gateway Deployment and Distribution","text":"In order for a multi-cluster gateway to be truly useful, it needs to be distributed or \"placed\" on a specific set of hub managed spoke clusters. Open Cluster Management is responsible for a set of placement and replication APIs. Kuadrant is aware of these APIs, and so when a given gateway is chosen to be placed on a set of managed clusters, Kuadrant multi-cluster gateway controller will ensure the right resources (ManifestWork
) are created in the correct namespaces in the hub. Open Cluster Management then is responsible for syncing these to the actual spoke cluster and reporting back the status of these resources to the Hub. A user would indicate which clusters they want a gateway placed on by using a Placement
and then labeling the gateway using the cluster.open-cluster-management.io/placement
label.
In order for the Gateway to be instantiated, we need to know what underlying gateway provider is being used on the spoke clusters. Admins can then set this provider in the hub via the GatewayClass params. In the hub, Kuadrant will then apply a transformation to the gateway to ensure when synced it references this spoke gateway provider (Istio for example).
It is the Open Cluster Management workagent that is responsible for syncing down and applying the resources into the managed spoke cluster. It is also responsible for syncing status information back to the hub. It is the multi-cluster gateway controller that is responsible for aggregating this status.
The status information reported back to the Hub is used by the multi-cluster gateway controller to know what LB hosts / IPAddresses to use for DNSRecords that it creates and manages.
More info on the Open Cluster Management hub and spoke architecture can be found here
"},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-integrate-with-gateway-providers","title":"How does Kuadrant integrate with Gateway Providers?","text":"Currently the Kuadrant data plane only integrates with an Istio based gateway provider:
- It registers Authorino with the
IstioOperator
as an auth provider so that Authorino can be used as an external auth provider. - It leverages an
EnvoyFilter
to register the rate limiting service as an upstream service. - Based on the Kuadrant
AuthPolicy
, it leverages Istio's AuthorizationPolicy
resource to configure when a request should trigger Authorino to be called for a given host, path and method etc. - It provides a WebAssembly (WASM) Plugin that conforms to the Proxy WASM ABI (application binary interface). This WASM Plugin is loaded into the underlying Envoy based gateway provider and configured via the Kuadrant Operator based on defined
RateLimitPolicy
resources. This binary is executed in response to a HTTP request being accepted by the gateway via the underlying Envoy instance that provides the proxy layer for the Gateway (IE Envoy). This plugin is configured with the correct upstream rate limit service name and when it sees a request, based on the provided configuration, it will trigger a call to the installed Limitador that is providing the rate limit capabilities and either allow the request to continue or trigger a response to the client with a 429 (too many requests) HTTP code.
"},{"location":"architecture/docs/design/architectural-overview/#data-flows","title":"Data Flows","text":"There are several different data flows when using Kuadrant.
"},{"location":"architecture/docs/design/architectural-overview/#control-plane-configuration-and-status-reporting","title":"Control plane configuration and status reporting","text":"The initial creation of these APIs (gateways, policies etc) is done by the relevant persona in the control plane just as they would any other k8s resource. We use the term cluster admin or gateway admin as the operations type persona configuring, and placing gateways. As shown above, in a multi-cluster configuration. API definitions are pulled from the Hub and \"manifested\" into the spokes. The Status of those synced resources are reported back to the Hub. The same happens for a single cluster, the only difference being the work agent hub controllers are all installed on one cluster.
"},{"location":"architecture/docs/design/architectural-overview/#third-party-enforcement-and-integration","title":"Third party enforcement and Integration","text":"In order to enforce the policy configuration, components in the control plane and data plane can reach out to configured 3rd parties such as cloud based DNS provider, TLS providers and Auth providers.
"},{"location":"architecture/docs/design/architectural-overview/#request-flow","title":"Request Flow","text":"Requests coming through the gateway instance can be sent to Limitador based on configuration of the WASM plugin installed into the Envoy based gateway provider or to Authorino based configuration provided by the Istio AuthorizationPolicy
. Each of these components have the capability to see the request and need to in order to make the required decision. Each of these components can also prevent the request from reaching its intended backend destination based on user configuration.
"},{"location":"architecture/docs/design/architectural-overview/#auth","title":"Auth","text":"As all of the APIs are CRDs, auth around creating these resources is handled in the standard way IE by the kubernetes cluster and RBAC. There is no relationship by default between the Auth features provided by Authorino to application developers and the auth requirements of the cluster API server.
For Auth between Spoke and Hub see Open Cluster Management docs
"},{"location":"architecture/docs/design/architectural-overview/#observability","title":"Observability","text":"Kuadrant doesn't provide any specific observability components, but rather provides a reference setup using well known and established components along with some useful dashboards to help observe key things around the Gateways. The focus of this setup, is in the context of a multi-cluster setup where Open Cluster Management is installed and gateways are being defined and distributed from that hub.
"},{"location":"architecture/docs/design/architectural-overview/#some-notes-on-future-direction","title":"Some notes on future direction","text":"This section is here to provide some insight into architectural changes that may be seen in the near future:
What is in this doc represents the architecture at point our MVP release. Below are some areas that we have identified that are likely to change in the coming releases. As these happen, this doc will also evolve.
- We want to separate out the ocm integration into its own controller so that policies can evolve without a coupling to any one multi-cluster management solution
- We want to separate the policies into their own controller that is capable of supporting both single (without Open Cluster Management) and multi-cluster (with Open Cluster Management enabled) contexts, so that the barrier to entry is reduced for those starting with a single cluster
- We want to allow for an on cluster DNS Provider such as CoreDNS so that we can provide an implementation that is disconnected from any cloud provider and provides more flexible DNS setups.
- We will look to reduce our integration with Istio and want to provide integration with additional gateway providers such as EnvoyGateway
"},{"location":"architecture/docs/design/modular_installation/","title":"Kuadrant Proposal - Modular Installation","text":"Kuadrant is developing a set of loosely coupled functionalities built directly on top of Kubernetes. Kuadrant aims to allow customers to just install, use and understand those functionalities they need.
"},{"location":"architecture/docs/design/modular_installation/#problem-statement","title":"Problem Statement","text":"Currently, the installation tool of kuadrant, the kuadrantctl CLI, installs all or nothing. Installing more than the customer needs adds unneeded complexity and operational effort. For example, if a customer is looking for rate limiting and not interested in authentication functionality, then the customer should be able to just install and run that part of Kuadrant.
"},{"location":"architecture/docs/design/modular_installation/#high-level-goals","title":"High Level Goals","text":" - Install only required components. Operate only required components.
Reduce system complexity and operational effort to the minimum required. Components in this context make reference to deployments and running instances.
- Expose only the activated functionalities
A user of a partial Kuadrant install should not be confronted with data in custom resources that has no meaning or is not accessible in their partial Kuadrant install. The design of the kuadrant API should have this goal into account.
"},{"location":"architecture/docs/design/modular_installation/#proposed-solution","title":"Proposed Solution","text":"The kuadrant installation mechanism should offer modular installation to enable/disable loosely coupled pieces of kuadrant. Modular installation options should be feature oriented rather than deployment component oriented. Then, it is up to the installation tool to decide what components need to be deployed and how to configure it.
Each feature, or part of it, is eligible to be included or excluded when installing kuadrant.
Some profiles can be defined to group set of commonly required features. Naming the profiles allows the customer to easily express wanted installation configuration. Furthermore, profiles not only can be used to group a set of features, profiles can be used to define deployment options.
Name Description Minimal Minimal installation required to run an API without any protection, analytics or API management. Default deployment option AuthZ Authentication and authorization mechanisms activated RateLimit Basic rate limit (only pre-auth rate limit) features Full Full featured kuadrant installation A kuadrant operator, together with a design of a kuadrant CRD is desired. Not only for kuadrant installation, but also for lifecycle management. Additionally, the kuadrantctl CLI tool can also be useful to either deploy kuadrant components and manifests or just deploy the kuadrant operator.
The kuadrant control plane should be aware of the installed profile via env vars or command line params in the control plane running components. With that information, the control plane can decide to enable or disable CRD watching, label and annotation monitoring and ultimately reject any configuration object that relies on disabled functionality. The least a customer can expect from kuadrant is to be consistent and reject any functionality request that cannot provide.
"},{"location":"architecture/rfcs/0001-rlp-v2/","title":"RateLimitPolicy API v2","text":" - Feature Name:
rlp-v2
- Start Date: 2023-02-02
- RFC PR: Kuadrant/architecture#12
- Issue tracking: Kuadrant/architecture#13
"},{"location":"architecture/rfcs/0001-rlp-v2/#summary","title":"Summary","text":"Proposal of new API for the Kuadrant's RateLimitPolicy
(RLP) CRD, for improved UX.
"},{"location":"architecture/rfcs/0001-rlp-v2/#motivation","title":"Motivation","text":"The RateLimitPolicy
API (v1beta1), particularly its RateLimit
type used in ratelimitpolicy.spec.rateLimits
, designed in part to fit the underlying implementation based on the Envoy Rate limit filter, has been proven to be complex, as well as somewhat limiting for the extension of the API for other platforms and/or for supporting use cases of not contemplated in the original design.
Users of the RateLimitPolicy
will immediately recognize elements of Envoy's Rate limit API in the definitions of the RateLimit
type, with almost 1:1 correspondence between the Configuration
type and its counterpart in the Envoy configuration. Although compatibility between those continue to be desired, leaking such implementation details to the level of the API can be avoided to provide a better abstraction for activators (\"matchers\") and payload (\"descriptors\"), stated by users in a seamless way.
Furthermore, the Limit
type \u2013 used as well in the RLP's RateLimit
type \u2013 implies presently a logical relationship between its inner concepts \u2013 i.e. conditions and variables on one side, and limits themselves on the other \u2013 that otherwise could be shaped in a different manner, to provide clearer understanding of the meaning of these concepts by the user and avoid repetition. I.e., one limit definition contains multiple rate limits, and not the other way around.
"},{"location":"architecture/rfcs/0001-rlp-v2/#goals","title":"Goals","text":" - Decouple the API from the underlying implementation - i.e. provide a more generic and more user-friendly abstraction
- Prepare the API for upcoming changes in the Gateway API Policy Attachment specification
- Improve consistency of the API with respect to Kuadrant's AuthPolicy CRD - i.e. same language, similar UX
"},{"location":"architecture/rfcs/0001-rlp-v2/#current-wip-to-consider","title":"Current WIP to consider","text":" - Policy attachment update (kubernetes-sigs/gateway-api#1565)
- No merging of policies (kuadrant/architecture#10)
- A single Policy scoped to HTTPRoutes and HTTPRouteRule (kuadrant/architecture#4) - future
- Implement
skip_if_absent
for the RequestHeaders action (kuadrant/wasm-shim#29)
"},{"location":"architecture/rfcs/0001-rlp-v2/#highlights","title":"Highlights","text":" spec.rateLimits[]
replaced with spec.limits{<limit-name>: <limit-definition>}
spec.rateLimits.limits
replaced with spec.limits.<limit-name>.rates
spec.rateLimits.limits.maxValue
replaced with spec.limits.<limit-name>.rates.limit
spec.rateLimits.limits.seconds
replaced with spec.limits.<limit-name>.rates.duration
+ spec.limits.<limit-name>.rates.unit
spec.rateLimits.limits.conditions
replaced with spec.limits.<limit-name>.when
, structured field based on well-known selectors, mainly for expressing conditions not related to the HTTP route (although not exclusively) spec.rateLimits.limits.variables
replaced with spec.limits.<limit-name>.counters
, based on well-known selectors spec.rateLimits.rules
replaced with spec.limits.<limit-name>.routeSelectors
, for selecting (or \"sub-targeting\") HTTPRouteRules that trigger the limit - new matcher
spec.limits.<limit-name>.routeSelectors.hostnames[]
spec.rateLimits.configurations
removed \u2013 descriptor actions configuration (previously spec.rateLimits.configurations.actions
) generated from spec.limits.<limit-name>.when.selector
\u222a spec.limits.<limit-name>.counters
and unique identifier of the limit (associated with spec.limits.<limit-name>.routeSelectors
) - Limitador conditions composed of \"soft\"
spec.limits.<limit-name>.when
conditions + a \"hard\" condition that binds the limit to its trigger HTTPRouteRules
For detailed differences between current and new RLP API, see Comparison to current RateLimitPolicy.
"},{"location":"architecture/rfcs/0001-rlp-v2/#guide-level-explanation","title":"Guide-level explanation","text":""},{"location":"architecture/rfcs/0001-rlp-v2/#examples-of-rlps-based-on-the-new-api","title":"Examples of RLPs based on the new API","text":"Given the following network resources:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: Gateway\nmetadata:\n name: istio-ingressgateway\n namespace: istio-system\nspec:\n gatewayClassName: istio\n listeners:\n\n - hostname:\n - \"*.acme.com\"\n---\napiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
The following are examples of RLPs targeting the route and the gateway. Each example is independent from the other.
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-1-minimal-example-network-resource-targeted-entirely-without-filtering-unconditional-and-unqualified-rate-limiting","title":"Example 1. Minimal example - network resource targeted entirely without filtering, unconditional and unqualified rate limiting","text":"In this example, all traffic to *.toystore.acme.com
will be limited to 5rps, regardless of any other attribute of the HTTP request (method, path, headers, etc), without any extra \"soft\" conditions (conditions non-related to the HTTP route), across all consumers of the API (unqualified rate limiting).
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-infra-rl\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n base: # user-defined name of the limit definition - future use for handling hierarchical policy attachment\n\n - rates: # at least one rate limit required\n - limit: 5\n unit: second\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-infra-rl/base\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-infra-rl/base == \"1\"\n max_value: 5\n seconds: 1\n namespace: TDB\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-2-targeting-specific-route-rules-with-counter-qualifiers-multiple-rates-per-limit-definition-and-soft-conditions","title":"Example 2. Targeting specific route rules, with counter qualifiers, multiple rates per limit definition and \"soft\" conditions","text":"In this example, a distinct limit will be associated (\"bound\") to each individual HTTPRouteRule of the targeted HTTPRoute, by using the routeSelectors
field for selecting (or \"sub-targeting\") the HTTPRouteRule.
The following limit definitions will be bound to each HTTPRouteRule:
/toys*
\u2192 50rpm, enforced per username (counter qualifier) and only in case the user is not an admin (\"soft\" condition). /assets/*
\u2192 5rpm / 100rp12h
Each set of trigger matches in the RLP will be matched to all HTTPRouteRules whose HTTPRouteMatches is a superset of the set of trigger matches in the RLP. For every HTTPRouteRule matched, the HTTPRouteRule will be bound to the corresponding limit definition that specifies that trigger. In case no HTTPRouteRule is found containing at least one HTTPRouteMatch that is identical to some set of matching rules of a particular limit definition, the limit definition is considered invalid and reported as such in the status of RLP.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-endpoint\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toys:\n rates:\n\n - limit: 50\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n\n assets:\n rates:\n\n - limit: 5\n duration: 1\n unit: minute\n - limit: 100\n duration: 12\n unit: hour\n routeSelectors:\n - matches: # matches the 2nd HTTPRouteRule (i.e. /assets/*)\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/toys\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.group\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"group\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n- rules:\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/assets\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-endpoint/toys == \"1\"\n - auth.identity.group != \"admin\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 60\n namespace: kuadrant\n- conditions:\n - toystore/toystore-per-endpoint/assets == \"1\"\n max_value: 5\n seconds: 60\n namespace: kuadrant\n- conditions:\n - toystore/toystore-per-endpoint/assets == \"1\"\n max_value: 100\n seconds: 43200 # 12 hours\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-3-targeting-a-subset-of-an-httprouterule-httproutematch-missing","title":"Example 3. Targeting a subset of an HTTPRouteRule - HTTPRouteMatch missing","text":"Consider a 150rps rate limit set on requests to GET /toys/special
. Such specific application endpoint is covered by the first HTTPRouteRule in the HTTPRoute (as a subset of GET
or POST
to any path that starts with /toys
). However, to avoid binding limits to HTTPRouteRules that are more permissive than the actual intended scope of the limit, the RateLimitPolicy controller requires trigger matches to find identical matching rules explicitly defined amongst the sets of HTTPRouteMatches of the HTTPRouteRules potentially targeted.
As a consequence, by simply defining a trigger match for GET /toys/special
in the RLP, the GET|POST /toys*
HTTPRouteRule will NOT be bound to the limit definition. In order to ensure the limit definition is properly bound to a routing rule that strictly covers the GET /toys/special
application endpoint, first the user has to modify the spec of the HTTPRoute by adding an explicit HTTPRouteRule for this case:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n - matches: # new (more specific) HTTPRouteRule added\n - path:\n type: Exact\n value: \"/toys/special\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\n
After that, the RLP can target the new HTTPRouteRule strictly:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-special-toys\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n specialToys:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the new HTTPRouteRule (i.e. GET /toys/special)\n - path:\n type: Exact\n value: \"/toys/special\"\n method: GET\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys/special\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-special-toys/specialToys\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-special-toys/specialToys == \"1\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-4-targeting-a-subset-of-an-httprouterule-httproutematch-found","title":"Example 4. Targeting a subset of an HTTPRouteRule - HTTPRouteMatch found","text":"This example is similar to Example 3. Consider the use case of setting a 150rpm rate limit on requests to GET /toys*
.
The targeted application endpoint is covered by the first HTTPRouteRule in the HTTPRoute (as a subset of GET
or POST
to any path that starts with /toys
). However, unlike in the previous example where, at first, no HTTPRouteRule included an explicit HTTPRouteMatch for GET /toys/special
, in this example the HTTPRouteMatch for the targeted application endpoint GET /toys*
does exist explicitly in one of the HTTPRouteRules, thus the RateLimitPolicy controller would find no problem to bind the limit definition to the HTTPRouteRule. That would nonetheless cause a unexpected behavior of the limit triggered not strictly for GET /toys*
, but also for POST /toys*
.
To avoid extending the scope of the limit beyond desired, with no extra \"soft\" conditions, again the user must modify the spec of the HTTPRoute, so an exclusive HTTPRouteRule exists for the GET /toys*
application endpoint:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n rules:\n - matches: # first HTTPRouteRule split into two \u2013 one for GET /toys*, other for POST /toys*\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
The RLP can then target the new HTTPRouteRule strictly:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toy-readers\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toyReaders:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the new more specific HTTPRouteRule (i.e. GET /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toy-readers/toyReaders\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toy-readers/toyReaders == \"1\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-5-one-limit-triggered-by-multiple-httprouterules","title":"Example 5. One limit triggered by multiple HTTPRouteRules","text":"In this example, both HTTPRouteRules, i.e. GET|POST /toys*
and /assets/*
, are targeted by the same limit of 50rpm per username.
Because the HTTPRoute has no other rule, this is technically equivalent to targeting the entire HTTPRoute and therefore similar to Example 1. However, if the HTTPRoute had other rules or got other rules added afterwards, this would ensure the limit applies only to the two original route rules.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-user\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toysOrAssetsPerUsername:\n rates:\n\n - limit: 50\n duration: 1\n unit: minute\n counters:\n - auth.identity.username\n routeSelectors:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-user/toysOrAssetsPerUsername\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-user/toysOrAssetsPerUsername == \"1\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 60\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-6-multiple-limit-definitions-targeting-the-same-httprouterule","title":"Example 6. Multiple limit definitions targeting the same HTTPRouteRule","text":"In case multiple limit definitions target a same HTTPRouteRule, all those limit definitions will be bound to the HTTPRouteRule. No limit \"shadowing\" will be be enforced by the RLP controller. Due to how things work as of today in Limitador nonetheless (i.e. the rule of the most restrictive limit wins), in some cases, across multiple limits triggered, one limit ends up \"shadowing\" others, depending on further qualification of the counters and the actual RL values.
E.g., the following RLP intends to set 50rps per username on GET /toys*
, and 100rps on POST /toys*
or /assets/*
:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-endpoint\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n readToys:\n rates:\n\n - limit: 50\n unit: second\n counters:\n - auth.identity.username\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n\n postToysOrAssets:\n rates:\n\n - limit: 100\n unit: second\n routeSelectors:\n - matches: # matches the 1st HTTPRouteRule (i.e. GET or POST to /toys*)\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n - matches: # matches the 2nd HTTPRouteRule (i.e. /assets/*)\n - path:\n type: PathPrefix\n value: \"/assets/\"\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/readToys\"\n descriptor_value: \"1\"\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/readToys\"\n descriptor_value: \"1\"\n - generic_key:\n descriptor_key: \"toystore/toystore-per-endpoint/postToysOrAssets\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions: # actually applies to GET|POST /toys*\n - toystore/toystore-per-endpoint/readToys == \"1\"\n variables:\n - auth.identity.username\n max_value: 50\n seconds: 1\n namespace: kuadrant\n- conditions: # actually applies to GET|POST /toys* and /assets/*\n - toystore/toystore-per-endpoint/postToysOrAssets == \"1\"\n max_value: 100\n seconds: 1\n namespace: kuadrant\n
This example was only written in this way to highlight that it is possible that multiple limit definitions select a same HTTPRouteRule. To avoid over-limiting between GET|POST /toys*
and thus ensure the originally intended limit definitions for each of these routes apply, the HTTPRouteRule should be split into two, like done in Example 4.
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-7-limits-triggered-for-specific-hostnames","title":"Example 7. Limits triggered for specific hostnames","text":"In the previous examples, the limit definitions and therefore the counters were set indistinctly for all hostnames \u2013 i.e. no matter if the request is sent to games.toystore.acme.com
or dolls.toystore.acme.com
, the same counters are expected to be affected. In this example on the other hand, a 1000rpd rate limit is set for requests to /assets/*
only when the hostname matches games.toystore.acme.com
.
First, the user needs to edit the HTTPRoute to make the targeted hostname games.toystore.acme.com
explicit:
apiVersion: gateway.networking.k8s.io/v1alpha2\nkind: HTTPRoute\nmetadata:\n name: toystore\n namespace: toystore\nspec:\n parentRefs:\n\n - name: istio-ingressgateway\n namespace: istio-system\n hostnames:\n - \"*.toystore.acme.com\"\n - games.toystore.acme.com # new (more specific) hostname added\n rules:\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n backendRefs:\n - name: toystore\n port: 80\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n backendRefs:\n - name: toystore\n port: 80\n filters:\n - type: ResponseHeaderModifier\n responseHeaderModifier:\n set:\n - name: Cache-Control\n value: \"max-age=31536000, immutable\"\n
After that, the RLP can target specifically the newly added hostname:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-per-hostname\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n games:\n rates:\n\n - limit: 1000\n unit: day\n routeSelectors:\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n hostnames:\n - games.toystore.acme.com\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/assets/*\"]\n hosts: [\"games.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-per-hostname/games\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - toystore/toystore-per-hostname/games == \"1\"\n max_value: 1000\n seconds: 86400 # 1 day\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#example-8-targeting-the-gateway","title":"Example 8. Targeting the Gateway","text":"Note: Additional meaning and context may be given to this use case in the future, when discussing defaults and overrides.
Targeting a Gateway is a shortcut to targeting all individual HTTPRoutes referencing the gateway as parent. This differs from Example 1 nonetheless because, by targeting the gateway rather than an individual HTTPRoute, the RLP applies automatically to all HTTPRoutes pointing to the gateway, including routes created before and after the creation of the RLP. Moreover, all those routes will share the same limit counters specified in the RLP.
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: gw-rl\n namespace: istio-ingressgateway\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: Gateway\n name: istio-ingressgateway\n limits:\n base:\n\n - rates:\n - limit: 5\n unit: second\n
How is this RLP implemented under the hood? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/assets/*\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"istio-system/gw-rl/base\"\n descriptor_value: \"1\"\n
limits:\n\n- conditions:\n - istio-system/gw-rl/base == \"1\"\n max_value: 5\n seconds: 1\n namespace: TDB\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#comparison-to-current-ratelimitpolicy","title":"Comparison to current RateLimitPolicy","text":"Current New Reason 1:1 relation between Limit (the object) and the actual Rate limit (the value) (spec.rateLimits.limits
) Rate limit becomes a detail of Limit where each limit may define one or more rates (1:N) (spec.limits.<limit-name>.rates
) - It allows to reuse
when
conditions and counters
for groups of rate limits
Parsed spec.rateLimits.limits.conditions
field, directly exposing the Limitador's API Structured spec.limits.<limit-name>.when
condition field composed of 3 well-defined properties: selector
, operator
and value
- Feels more K8s-native
- Consistent with github.com/kuadrant/authorino/api/v1beta1#JSONPatternExpression
- No need for a parser (only if implemented by Limitador)
spec.rateLimits.configurations
as a list of \"variables assignments\" and direct exposure of Envoy's RL descriptor actions API Descriptor actions composed from selectors used in the limit definitions (spec.limits.<limit-name>.when.selector
and spec.limits.<limit-name>.counters
) plus a fixed identifier of the route rules (spec.limits.<limit-name>.routeSelectors
) - Abstract the Envoy-specific concepts of \"actions\" and \"descriptors\"
- No risk of mismatching descriptors keys between \"actions\" and actual usage in the limits
- No user-defined generic descriptors (e.g. \"limited = 1\")
- Source value of the selectors defined from an implicit \"context\" data structure
Key-value descriptors Structured descriptors referring to a contextual well-known data structure - Consistent with Authorino's Authorization JSON (#context)
Limitador conditions independent from the route rules Artificial Limitador condition injected to bind routes and corresponding limits - Ensure the limit is enforced only for corresponding selected HTTPRouteRules
translate(spec.rateLimits.rules) \u2282 httproute.spec.rules
spec.limits.<limit-name>.routeSelectors.matches \u2286 httproute.spec.rules.matches
- HTTPRouteRule selector (via HTTPRouteMatch subset)
- Gateway API language
- Preparation for inherited policies and defaults & overrides
spec.rateLimits.limits.seconds
spec.limits.<limit-name>.rates.duration
and spec.limits.<limit-name>.rates.unit
- Support for more units beyond seconds
duration: 1
by default
spec.rateLimits.limits.variables
spec.limits.<limit-name>.counters
- Improved (more specific) naming
spec.rateLimits.limits.maxValue
spec.limits.<limit-name>.rates.limit
- Improved (more generic) naming
"},{"location":"architecture/rfcs/0001-rlp-v2/#reference-level-explanation","title":"Reference-level explanation","text":"By completely dropping out the configurations
field from the RLP, composing the RL descriptor actions is now done based essentially on the selectors listed in the when
conditions and the counters
, plus an artificial condition used to bind the HTTPRouteRules to the corresponding limits to trigger in Limitador.
The descriptor actions composed from the selectors in the \"soft\" when
conditions and counter qualifiers originate from the direct references these selectors make to paths within a well-known data structure that stores information about the context (HTTP request and ext-authz filter). These selectors in \"soft\" when
conditions and counter qualifiers are thereby called well-known selectors.
Other descriptor actions might be composed by the RLP controller to define additional RL conditions to bind HTTPRouteRules and corresponding limits.
"},{"location":"architecture/rfcs/0001-rlp-v2/#well-known-selectors","title":"Well-known selectors","text":"Each selector used in a when
condition or counter qualifier is a direct reference to a path within a well-known data structure that stores information about the context
(L4 and L7 data of the original request handled by the proxy), as well as auth
data (dynamic metadata occasionally exported by the external authorization filter and injected by the proxy into the rate-limit filter).
The well-known data structure for building RL descriptor actions resembles Authorino's \"Authorization JSON\", whose context
component consists of Envoy's AttributeContext
type of the external authorization API (marshalled as JSON). Compared to the more generic RateLimitRequest
struct, the AttributeContext
provides a more structured and arguibly more intuitive relation between the data sources for the RL descriptors actions and their corresponding key names through which the values are referred within the RLP, in a context of predominantly serving for HTTP applications.
To keep compatibility with the Envoy Rate Limit API, the well-known data structure can optionally be extended with the RateLimitRequest
, thus resulting in the following final structure.
context: # Envoy's Ext-Authz `CheckRequest.AttributeContext` type\n source:\n address: \u2026\n service: \u2026\n \u2026\n destination:\n address: \u2026\n service: \u2026\n \u2026\n request:\n http:\n host: \u2026\n path: \u2026\n method: \u2026\n headers: {\u2026}\n\nauth: # Dynamic metadata exported by the external authorization service\n\nratelimit: # Envoy's Rate Limit `RateLimitRequest` type\n domain: \u2026 # generated by the Kuadrant controller\n descriptors: {\u2026} # descriptors configured by the user directly in the proxy (not generated by the Kuadrant controller, if allowed)\n hitsAddend: \u2026 # only in case we want to allow users to refer to this value in a policy\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#mechanics-of-generating-rl-descriptor-actions","title":"Mechanics of generating RL descriptor actions","text":"From the perspective of a user who writes a RLP, the selectors used in then when
and counters
fields are paths to the well-known data structure (see Well-known selectors). While desiging a policy, the user intuitively pictures the well-known data structure and states each limit definition having in mind the possible values assumed by each of those paths in the data plane. For example,
The user story:
Each distinct user (auth.identity.username
) can send no more than 1rps to the same HTTP path (context.request.http.path
).
...materializes as the following RLP:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n dolls:\n rates:\n\n - limit: 1\n unit: second\n counters:\n - auth.identity.username\n - context.request.http.path\n
The following selectors are to be interpreted by the RLP controller:
auth.identity.username
context.request.http.path
The RLP controller uses a map to translate each selector into its corresponding descriptor action. (Roughly described:)
context.source.address \u2192 source_cluster(...) # TBC\ncontext.source.service \u2192 source_cluster(...) # TBC\ncontext.destination... \u2192 destination_cluster(...)\ncontext.destination... \u2192 destination_cluster(...)\ncontext.request.http.<X> \u2192 request_headers(header_name: \":<X>\")\ncontext.request... \u2192 ...\nauth.<X> \u2192 metadata(key: \"envoy.filters.http.ext_authz\", path: <X>)\nratelimit.domain \u2192 <hostname>\n
...to yield effectively:
rate_limits:\n\n- actions:\n - metadata:\n descriptor_key: \"auth.identity.username\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"username\"\n - request_headers:\n descriptor_key: \"context.request.http.path\"\n header_name: \":path\"\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#artificial-limitador-condition-for-routeselectors","title":"Artificial Limitador condition for routeSelectors
","text":"For each limit definition that explicitly or implicitly defines a routeSelectors
field, the RLP controller will generate an artificial Limitador condition that ensures that the limit applies only when the filterred rules are honoured when serving the request. This can be implemented with a 2-step procedure:
- generate an unique identifier of the limit - i.e.
<policy-namespace>/<policy-name>/<limit-name>
- associate a
generic_key
type descriptor action with each HTTPRouteRule
targeted by the limit \u2013 i.e. { descriptor_key: <unique identifier of the limit>, descriptor_value: \"1\" }
.
For example, given the following RLP:
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-non-admin-users\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n toys:\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: POST\n rates:\n - limit: 50\n duration: 1\n unit: minute\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n\n assets:\n routeSelectors:\n\n - matches:\n - path:\n type: PathPrefix\n value: \"/assets/\"\n rates:\n - limit: 5\n duration: 1\n unit: minute\n when:\n - selector: auth.identity.group\n operator: neq\n value: admin\n
Apart from the following descriptor action associated with both routes:
- metadata:\n descriptor_key: \"auth.identity.group\"\n metadata_key:\n key: \"envoy.filters.http.ext_authz\"\n path:\n - segment:\n key: \"identity\"\n - segment:\n key: \"group\"\n
...and its corresponding Limitador condition:
auth.identity.group != \"admin\"\n
The following additional artificial descriptor actions will be generated:
# associated with route rule GET|POST /toys*\n\n- generic_key:\n descriptor_key: \"toystore/toystore-non-admin-users/toys\"\n descriptor_value: \"1\"\n\n# associated with route rule /assets/*\n\n- generic_key:\n descriptor_key: \"toystore/toystore-non-admin-users/assets\"\n descriptor_value: \"1\"\n
...and their corresponding Limitador conditions.
In the end, the following Limitador configuration is yielded:
- conditions:\n - toystore/toystore-non-admin-users/toys == \"1\"\n - auth.identity.group != \"admin\"\n max_value: 50\n seconds: 60\n namespace: kuadrant\n\n\n- conditions:\n - toystore/toystore-non-admin-users/assets == \"1\"\n - auth.identity.group != \"admin\"\n max_value: 5\n seconds: 60\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#support-in-wasm-shim-and-envoy-rl-api","title":"Support in wasm shim and Envoy RL API","text":"This proposal tries to keep compatibility with the Envoy API for rate limit and does not introduce any new requirement that otherwise would require the use of wasm shim to be implemented.
In the case of implementation of this proposal in the wasm shim, all types of matchers supported by the HTTPRouteMatch type of Gateway API must be also supported in the rate_limit_policies.gateway_actions.rules
field of the wasm plugin configuration. These include matchers based on path (prefix, exact), headers, query string parameters and method.
"},{"location":"architecture/rfcs/0001-rlp-v2/#drawbacks","title":"Drawbacks","text":"HTTPRoute editing occasionally required Need to duplicate rules that don't explicitly include a matcher wanted for the policy, so that matcher can be added as a special case for each of those rules.
Risk of over-targeting Some HTTPRouteRules might need to be split into more specific ones so a limit definition is not bound to beyond intended (e.g. target method: GET
when the route matches method: POST|GET
).
Prone to consistency issues Typos and updates to the HTTPRoute can easily cause a mismatch and invalidate a RLP.
Two types of conditions \u2013 routeSelectors
and when
conditions Although with different meanings (evaluates in the gateway vs. evaluated in Limitador) and meant for expressing different types of rules (HTTPRouteRule selectors vs. \"soft\" conditions based on attributes not related to the HTTP request), users might still perceive these as two ways of expressing conditions and find difficult to understand at first that \"soft\" conditions do not accept expressions related to attributes of the HTTP request.
"},{"location":"architecture/rfcs/0001-rlp-v2/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0001-rlp-v2/#targeting-full-httprouterules","title":"Targeting full HTTPRouteRules","text":"Requiring users to specify full HTTPRouteRule matches in the RLP (as opposed to any subset of HTTPRoureMatches of targeted HTTPRouteRules \u2013 current proposal) contains some of the same drawbacks of this proposal, such as HTTPRoute editing occasionally required and prone to consistency issues. If, on one hand, it eliminates the risk of over-targeting, on the other hand, it does it at the cost of requiring excessively verbose policies written by the users, to the point of sometimes expecting user to have to specify trigger matching rules that are significantly more than what's originally and strictly intended.
E.g.:
On a HTTPRoute that contains the following HTTPRouteRules (simplified representation):
{ header: x-canary=true } \u2192 backend-canary\n{ * } \u2192 backend-rest\n
Where the user wants to define a RLP that targets { method: POST }
. First, the user needs to edit the HTTPRoute and duplicate the HTTPRouteRules:
{ header: x-canary=true, method: POST } \u2192 backend-canary\n{ header: x-canary=true } \u2192 backend-canary\n{ method: POST } \u2192 backend-rest\n{ * } \u2192 backend-rest\n
Then, user needs to include the following trigger in the RLP so only full HTTPRouteRules are specified:
{ header: x-canary=true, method: POST }\n{ method: POST }\n
The first matching rule of the trigger (i.e. { header: x-canary=true, method: POST }
) is beoynd the original user intent of targeting simply { method: POST }
.
This issue can be even more concerning in the case of targeting gateways with multiple child HTTPRoutes. All the HTTPRoutes would have to be fixed and the HTTPRouteRules that cover for all the cases in all HTTPRoutes listed in the policy targeting the gateway.
"},{"location":"architecture/rfcs/0001-rlp-v2/#all-limit-definitions-apply-vs-limit-shadowing","title":"All limit definitions apply vs. Limit \"shadowing\"","text":"The proposed binding between limit definition and HTTPRouteRules that trigger the limits was thought so multiple limit definitions can be bound to a same HTTPRouteRule that triggers those limits in Limitador. That means that no limit definition will \"shadow\" another at the level of the RLP controller, i.e. the RLP controller will honour the intended binding according to the selectors specified in the policy.
Due to how things work as of today in Limitador nonetheless, i.e., the rule of the most restrictive limit wins, and because all limit definitions triggered by a given shared HTTPRouteRule, it might be the case that, across multiple limits triggered, one limit ends up \"shadowing\" other limits. However, that is by implementation of Limitador and therefore beyond the scope of the API.
An alternative to the approach of allowing all limit definitions to be bound to a same selected HTTPRouteRules would be enforcing that, amongst multiple limit definitions targeting a same HTTPRouteRule, only the first of those limits definitions is bound to the HTTPRouteRule. This alternative approach effectively would cause the first limit to \"shadow\" any other on that particular HTTPRouteRule, as by implementation of the RLP controller (i.e., at API level).
While the first approach causes an artificial Limitador condition of the form <policy-ns>/<policy-name>/<limit-name> == \"1\"
, the alternative approach (\"limit shadowing\") could be implemented by generating a descriptor of the following form instead: ratelimit.binding == \"<policy-ns>/<policy-name>/<limit-name>\"
.
The downside of allowing multiple bindings to the same HTTPRouteRule is that all limits apply in Limitador, thus making status report frequently harder. The most restritive rate limit strategy implemented by Limitador might not be obvious to users who set multiple limit definitions and will require additional information reported back to the user about the actual status of the limit definitions stated in a RLP. On the other hand, it allows enables use cases of different limit definitions that vary on the counter qualifiers, additional \"soft\" conditions, or actual rate limit values to be triggered by a same HTTPRouteRule.
"},{"location":"architecture/rfcs/0001-rlp-v2/#writing-soft-when-conditions-based-on-attributes-of-the-http-request","title":"Writing \"soft\" when
conditions based on attributes of the HTTP request","text":"As a first step, users will not be able to write \"soft\" when
conditions to selective apply rate limit definitions based on attributes of the HTTP request that otherwise could be specified using the routeSelectors
field of the RLP instead.
On one hand, using when
conditions for route filtering would make it easy to define limits when the HTTPRoute cannot be modified to include the special rule. On the other hand, users would miss information in the status. An HTTPRouteRule for GET|POST /toys*
, for example, that is targeted with an additional \"soft\" when
condition that specifies that the method must be equal to GET
and the path exactly equal to /toys/special
(see Example 3) would be reported as rate limited with extra details that this is in fact only for GET /toys/special
. For small deployments, this might be considered acceptable; however it would easily explode to unmanageable number of cases for deployments with only a few limit definitions and HTTPRouteRules.
Moreover, by not specifying a more strict HTTPRouteRule for GET /toys/special
, the RLP controller would bind the limit definition to other rules that would cause the rate limit filter to invoke the rate limit service (Limitador) for cases other than strictly GET /toys/special
. Even if the rate limits would still be ensured to apply in Limitador only for GET /toys/special
(due to the presence of a hypothetical \"soft\" when
condition), an extra no-op hop to the rate limit service would happen. This is avoided with the current imposed limitation.
Example of \"soft\" when
conditions for rate limit based on attributes of the HTTP request (NOT SUPPORTED):
apiVersion: kuadrant.io/v2beta1\nkind: RateLimitPolicy\nmetadata:\n name: toystore-special-toys\n namespace: toystore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: toystore\n limits:\n specialToys:\n rates:\n\n - limit: 150\n unit: second\n routeSelectors:\n - matches: # matches the original HTTPRouteRule GET|POST /toys*\n - path:\n type: PathPrefix\n value: \"/toys\"\n method: GET\n when:\n - selector: context.request.http.method # cannot omit this selector or POST /toys/special would also be rate limited\n operator: eq\n value: GET\n - selector: context.request.http.path\n operator: eq\n value: /toys/special\n
How is this RLP would be implemented under the hood if supported? gateway_actions:\n\n- rules:\n - paths: [\"/toys*\"]\n methods: [\"GET\"]\n hosts: [\"*.toystore.acme.com\"]\n - paths: [\"/toys*\"]\n methods: [\"POST\"]\n hosts: [\"*.toystore.acme.com\"]\n configurations:\n - generic_key:\n descriptor_key: \"toystore/toystore-special-toys/specialToys\"\n descriptor_value: \"1\"\n - request_headers:\n descriptor_key: \"context.request.http.method\"\n header_name: \":method\"\n - request_headers:\n descriptor_key: \"context.request.http.path\"\n header_name: \":path\"\n
limits:\n\n- conditions:\n - toystore/toystore-special-toys/specialToys == \"1\"\n - context.request.http.method == \"GET\"\n - context.request.http.path == \"/toys/special\"\n max_value: 150\n seconds: 1\n namespace: kuadrant\n
"},{"location":"architecture/rfcs/0001-rlp-v2/#possible-variations-for-the-selectors-conditions-and-counter-qualifiers","title":"Possible variations for the selectors (conditions and counter qualifiers)","text":"The main drivers behind the proposed design for the selectors (conditions and counter qualifiers), based on (i) structured condition expressions composed of fields selector
, operator
, and value
, and (ii) when
conditions and counters
separated in two distinct fields (variation \"C\" below), are:
- consistency with the Authorino
AuthConfig
API, which also specifies when
conditions expressed in selector
, operator
, and value
fields; - explicit user intent, without subtle distinction of meaning based on presence of optional fields.
Nonetheless here are a few alternative variations to consider:
Structured condition expressions Parsed condition expressions Single field A \nselectors:\n\n - selector: context.request.http.method\n operator: eq\n value: GET\n - selector: auth.identity.username
B \nselectors:\n - context.request.http.method == \"GET\"\n - auth.identity.username
Distinct fields C \u2b50\ufe0f \nwhen:\n - selector: context.request.http.method\n operator: eq\n value: GET\ncounters:\n - auth.identity.username
D \nwhen:\n - context.request.http.method == \"GET\"\ncounters:\n - auth.identity.username
\u2b50\ufe0f Variation adopted for the examples and (so far) final design proposal.
"},{"location":"architecture/rfcs/0001-rlp-v2/#prior-art","title":"Prior art","text":"Most implementations currently orbiting around Gateway API (e.g. Istio, Envoy Gateway, etc) for added RL functionality seem to have been leaning more to the direct route extension pattern instead of Policy Attachment. That might be an option particularly suitable for gateway implementations (gateway providers) and for those aiming to avoid dealing with defaults and overrides.
"},{"location":"architecture/rfcs/0001-rlp-v2/#unresolved-questions","title":"Unresolved questions","text":" - In case a limit definition lists route selectors such that some can be bound to HTTPRouteRules and some cannot (see Example 6), do we bind the valid route selectors and ignore the invalid ones or the limit definition is invalid altogether and bound to no HTTPRouteRule at all? A: By allowing multiple limit definitions to target a same HTTPRouteRule, the issue here stated will become less often. For the other cases where a limit definition still fails to select an HTTPRouteRule (e.g. due to mismatching trigger matches), the limit definition is not considered invalid. Possibly the limit definitions is considered \"stale\" (or \"orphan\"), i.e., not bound to any HTTPRouteRule.
- What should we fill domain/namespace with, if no longer with the hostname? This can be useful for multi-tenancy. A: For now, the domain/namespace field of the RL configuration (Envoy and Limitador ends) will be filled with a fixed (configurable) string (e.g. \"kuadrant\"). This can change in future to better support multi-tenancy and/or other use cases where a total sharding of the limit definitions within a same instance of Kuadrant is desired.
- How do we support lists of hostnames in Limitador conditions (single counter)? Should we open an issue for a new
in
operator? A: Not needed. The hostnames must exist in the targeted object explicitly, just like any other routing rules intended to be targeted by a limit definition. By setting the explicit hostname in the targeted network object (Gateway or HTTPRoute), the also becomes a route rules available for \"hard\" trigger configuration. - What \"soft\" condition
operator
s do we need to support (e.g. eq
, neq
, exists
, nexists
, matches
)? - Do we need special field to define shared counters across clusters/Limitador instances or that's to be solved at another layer (
Limitador
, Kuadrant
CRDs, MCTC)?
"},{"location":"architecture/rfcs/0001-rlp-v2/#future-possibilities","title":"Future possibilities","text":" - Port
routeSelectors
and the semantics around it to the AuthPolicy
API (aka \"KAP v2\"). - Defaults and overrides, either along the lines of architecture#4 or architecture#10.
"},{"location":"architecture/rfcs/0002-well-known-attributes/","title":"Well-known Attributes","text":" - Feature Name:
well-known-attributes
- Start Date: 2023-06-13
- RFC PR: Kuadrant/architecture#17
- Issue tracking: Kuadrant/architecture#53
"},{"location":"architecture/rfcs/0002-well-known-attributes/#summary","title":"Summary","text":"Define a well-known structure for users to declare request data selectors in their RateLimitPolicies and AuthPolicies. This structure is referred to as the Kuadrant Well-known Attributes.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#motivation","title":"Motivation","text":"The well-known attributes let users write policy rules \u2013 conditions and, in general, dynamic values that refer to attributes in the data plane - in a concise and seamless way.
Decoupled from the policy CRDs, the well-known attributes:
- define a common language for referring to values of the data plane in the Kuadrant policies;
- allow dynamically evolving the policy APIs regarding how they admit references to data plane attributes;
- encompass all common and component-specific selectors for data plane attributes;
- have a single and unified specification, although this specification may occasionally link to additional, component-specific, external docs.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#guide-level-explanation","title":"Guide-level explanation","text":"One who writes a Kuadrant policy and wants to build policy constructs such as conditions, qualifiers, variables, etc, based on dynamic values of the data plane, must refer the attributes that carry those values, using the declarative language of Kuadrant's Well-known Attributes.
A dynamic data plane value is typically a value of an attribute of the request or an Envoy Dynamic Metadata entry. It can be a value of the outer request being handled by the API gateway or proxy that is managed by Kuadrant (\"context request\") or an attribute of the direct request to the Kuadrant component that delivers the functionality in the data plane (rate-limiting or external auth).
A Well-known Selector is a construct of a policy API whose value contains a direct reference to a well-known attribute. The language of the well-known attributes and therefore what one would declare within a well-known selector resembles a JSON path for navigating a possibly complex JSON object.
Example 1. Well-known selector used in a condition
apiGroup: examples.kuadrant.io\nkind: PaintPolicy\nspec:\n rules:\n\n - when:\n - selector: auth.identity.group\n operator: eq\n value: admin\n color: red\n
In the example, auth.identity.group
is a well-known selector of an attribute group
, known to be injected by the external authorization service (auth
) to describe the group the user (identity
) belongs to. In the data plane, whenever this value is equal to admin
, the abstract PaintPolicy
policy states that the traffic must be painted red
.
Example 2. Well-known selector used in a variable
apiGroup: examples.kuadrant.io\nkind: PaintPolicy\nspec:\n rules:\n\n - color: red\n alpha:\n dynamic: request.headers.x-color-alpha\n
In the example, request.headers.x-color-alpha
is a selector of a well-known attribute request.headers
that gives access to the headers of the context HTTP request. The selector retrieves the value of the x-color-alpha
request header to dynamically fill the alpha
property of the abstract PaintPolicy
policy at each request.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#reference-level-explanation","title":"Reference-level explanation","text":"The Well-known Attributes are a compilation inspired by some of the Envoy attributes and Authorino's Authorization JSON and its related JSON paths.
From the Envoy attributes, only attributes that are available before establishing connection with the upstream server qualify as a Kuadrant well-known attribute. This excludes attributes such as the response attributes and the upstream attributes.
As for the attributes inherited from Authorino, these are either based on Envoy's AttributeContext
type of the external auth request API or from internal types defined by Authorino to fulfill the Auth Pipeline.
These two subsets of attributes are unified into a single set of well-known attributes. For each attribute that exists in both subsets, the name of the attribute as specified in the Envoy attributes subset prevails. Example of such is request.id
(to refer to the ID of the request) superseding context.request.http.id
(as the same attribute is referred in an Authorino AuthConfig
).
The next sections specify the well-known attributes organized in the following groups:
- Request attributes
- Connection attributes
- Metadata and filter state attributes
- Auth attributes
- Rate-limit attributes
"},{"location":"architecture/rfcs/0002-well-known-attributes/#request-attributes","title":"Request attributes","text":"The following attributes are related to the context HTTP request that is handled by the API gateway or proxy managed by Kuadrant.
Attribute
Type
Description
Auth
RL
request.id
String
Request ID corresponding to x-request-id
header value
\u2713
\u2713
request.time
Timestamp
Time of the first byte received
\u2713
\u2713
request.protocol
String
Request protocol (\u201cHTTP/1.0\u201d, \u201cHTTP/1.1\u201d, \u201cHTTP/2\u201d, or \u201cHTTP/3\u201d)
\u2713
\u2713
request.scheme
String
The scheme portion of the URL e.g. \u201chttp\u201d
\u2713
\u2713
request.host
String
The host portion of the URL
\u2713
\u2713
request.method
String
Request method e.g. \u201cGET\u201d
\u2713
\u2713
request.path
String
The path portion of the URL
\u2713
\u2713
request.url_path
String
The path portion of the URL without the query string
\u2713
request.query
String
The query portion of the URL in the format of \u201cname1=value1&name2=value2\u201d
\u2713
\u2713
request.headers
Map<String, String>
All request headers indexed by the lower-cased header name
\u2713
\u2713
request.referer
String
Referer request header
\u2713
request.useragent
String
User agent request header
\u2713
request.size
Number
The HTTP request size in bytes. If unknown, it must be -1
\u2713
request.body
String
The HTTP request body. (Disabled by default. Requires additional proxy configuration to enabled it.)
\u2713
request.raw_body
Array<Number>
The HTTP request body in bytes. This is sometimes used instead of body
depending on the proxy configuration.
\u2713
request.context_extensions
Map<String, String>
This is analogous to request.headers
, however these contents are not sent to the upstream server. It provides an extension mechanism for sending additional information to the auth service without modifying the proto definition. It maps to the internal opaque context in the proxy filter chain. (Requires additional configuration in the proxy.)
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#connection-attributes","title":"Connection attributes","text":"The following attributes are available once the downstream connection with the API gateway or proxy managed by Kuadrant is established. They apply to HTTP requests (L7) as well, but also to proxied connections limited at L3/L4.
Attribute
Type
Description
Auth
RL
source.address
String
Downstream connection remote address
\u2713
\u2713
source.port
Number
Downstream connection remote port
\u2713
\u2713
source.service
String
The canonical service name of the peer
\u2713
source.labels
Map<String, String>
The labels associated with the peer. These could be pod labels for Kubernetes or tags for VMs. The source of the labels could be an X.509 certificate or other configuration.
\u2713
source.principal
String
The authenticated identity of this peer. If an X.509 certificate is used to assert the identity in the proxy, this field is sourced from \u201cURI Subject Alternative Names\u201c, \u201cDNS Subject Alternate Names\u201c or \u201cSubject\u201c in that order. The format is issuer specific \u2013 e.g. SPIFFE format is spiffe://trust-domain/path
, Google account format is https://accounts.google.com/{userid}
.
\u2713
source.certificate
String
The X.509 certificate used to authenticate the identify of this peer. When present, the certificate contents are encoded in URL and PEM format.
\u2713
destination.address
String
Downstream connection local address
\u2713
\u2713
destination.port
Number
Downstream connection local port
\u2713
\u2713
destination.service
String
The canonical service name of the peer
\u2713
destination.labels
Map<String, String>
The labels associated with the peer. These could be pod labels for Kubernetes or tags for VMs. The source of the labels could be an X.509 certificate or other configuration.
\u2713
destination.principal
String
The authenticated identity of this peer. If an X.509 certificate is used to assert the identity in the proxy, this field is sourced from \u201cURI Subject Alternative Names\u201c, \u201cDNS Subject Alternate Names\u201c or \u201cSubject\u201c in that order. The format is issuer specific \u2013 e.g. SPIFFE format is spiffe://trust-domain/path
, Google account format is https://accounts.google.com/{userid}
.
\u2713
destination.certificate
String
The X.509 certificate used to authenticate the identify of this peer. When present, the certificate contents are encoded in URL and PEM format.
\u2713
connection.id
Number
Downstream connection ID
\u2713
connection.mtls
Boolean
Indicates whether TLS is applied to the downstream connection and the peer ceritificate is presented
\u2713
connection.requested_server_name
String
Requested server name in the downstream TLS connection
\u2713
connection.tls_session.sni
String
SNI used for TLS session
\u2713
connection.tls_version
String
TLS version of the downstream TLS connection
\u2713
connection.subject_local_certificate
String
The subject field of the local certificate in the downstream TLS connection
\u2713
connection.subject_peer_certificate
String
The subject field of the peer certificate in the downstream TLS connection
\u2713
connection.dns_san_local_certificate
String
The first DNS entry in the SAN field of the local certificate in the downstream TLS connection
\u2713
connection.dns_san_peer_certificate
String
The first DNS entry in the SAN field of the peer certificate in the downstream TLS connection
\u2713
connection.uri_san_local_certificate
String
The first URI entry in the SAN field of the local certificate in the downstream TLS connection
\u2713
connection.uri_san_peer_certificate
String
The first URI entry in the SAN field of the peer certificate in the downstream TLS connection
\u2713
connection.sha256_peer_certificate_digest
String SHA256 digest of the peer certificate in the downstream TLS connection if present
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#metadata-and-filter-state-attributes","title":"Metadata and filter state attributes","text":"The following attributes are related to the Envoy proxy filter chain. They include metadata exported by the proxy throughout the filters and information about the states of the filters themselves.
Attribute
Type
Description
Auth
RL
metadata
Metadata
Dynamic request metadata
\u2713
\u2713
filter_state
Map<String, String>
Mapping from a filter state name to its serialized string value
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#auth-attributes","title":"Auth attributes","text":"The following attributes are exclusive of the external auth service (Authorino).
Attribute
Type
Description
Auth
RL
auth.identity
Any
Single resolved identity object, post-identity verification
\u2713
auth.metadata
Map<String, Any>
External metadata fetched
\u2713
auth.authorization
Map<String, Any>
Authorization results resolved by each authorization rule, access granted only
\u2713
auth.response
Map<String, Any>
Response objects exported by the auth service post-access granted
\u2713
auth.callbacks
Map<String, Any>
Response objects returned by the callback requests issued by the auth service
\u2713
The auth service also supports modifying selected values by chaining modifiers in the path.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#rate-limit-attributes","title":"Rate-limit attributes","text":"The following attributes are exclusive of the rate-limiting service (Limitador).
Attribute
Type
Description
Auth
RL
ratelimit.domain
String
The rate limit domain. This enables the configuration to be namespaced per application (multi-tenancy).
\u2713
ratelimit.hits_addend
Number
Specifies the number of hits a request adds to the matched limit. Fixed value: `1`. Reserved for future usage.
\u2713
"},{"location":"architecture/rfcs/0002-well-known-attributes/#drawbacks","title":"Drawbacks","text":"The decoupling of the well-known attributes and the language of well-known attributes and selectors from the individual policy CRDs is what makes it somewhat flexible and common across the components (rate-limiting and auth). However, it's less structured and it introduces another syntax for users to get familiar with.
This additional language competes with the language of the route selectors (RFC 0001), based on Gateway API's HTTPRouteMatch
type.
Being \"soft-coded\" in the policy specs (as opposed to a hard-coded sub-structure inside of each policy type) does not mean it's completely decoupled from implementation in the control plane and/or intermediary data plane components. Although many attributes can be supported almost as a pass-through, from being used in a selector in a policy, to a corresponding value requested by the wasm-shim to its host, that is not always the case. Some translation may be required for components not integrated via wasm-shim (e.g. Authorino), as well as for components integrated via wasm-shim (e.g. Limitador) in special cases of composite or abstraction well-known attributes (i.e. attributes not available as-is via ABI, e.g. auth.identity
in a RLP). Either way, some validation of the values introduced by users in the selectors may be needed at some point in the control plane, thus requiring arguably a level of awaresness and coupling between the well-known selectors specification and the control plane (policy controllers) or intermediary data plane (wasm-shim) components.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#rationale-and-alternatives","title":"Rationale and alternatives","text":"As an alternative to JSON path-like selectors based on a well-known structure that induces the proposed language of well-known attributes, these same attributes could be defined as sub-types of each policy CRD. The Golang packages defining the common attributes across CRDs could be shared by the policy type definitions to reduce repetition. However, that approach would possibly involve a staggering number of new type definitions to cover all the cases for all the groups of attributes to be supported. These are constructs that not only need to be understood by the policy controllers, but also known by the user who writes a policy.
Additionally, all attributes, including new attributes occasionally introduced by Envoy and made available to the wasm-shim via ABI, would always require translation from the user-level abstraction how it's represented in a policy, to the actual form how it's used in the wasm-shim configuration and Authorino AuthConfigs.
Not implementing this proposal and keeping the current state of things mean little consistency between these common constructs for rules and conditions on how they are represented in each type of policy. This lack of consistency has a direct impact on the overhead faced by users to learn how to interact with Kuadrant and write different kinds of policies, as well as for the maintainers on tasks of coding for policy validation and reconciliation of data plane configurations.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#prior-art","title":"Prior art","text":"Authorino's dynamic JSON paths, related to Authorino's Authorization JSON and used in when
conditions and inside of multiple other constructs of the AuthConfig, are an example of feature of very similar approach to the one proposed here.
Arguably, Authorino's perceived flexibility would not have been possible with the Authorization JSON selectors. Users can write quite sophisticated policy rules (conditions, variable references, etc) by leveraging the those dynamic selectors. Because they are backed by JSON-based machinery in the code, Authorino's selectors have very little to, in some cases, none at all variation compared Open Policy Agent's Rego policy language, which is often used side by side in the same AuthConfigs.
Authorino's Authorization JSON selectors are, in one hand, more restrict to the structure of the CheckRequest
payload (context.*
attributes). At the same time, they are very open in the part associated with the internal attributes built along the Auth Pipeline (i.e. auth.*
attributes). That makes Authorino's Authorization JSON selectors more limited, compared to the Envoy attributes made available to the wasm-shim via ABI, but also harder to validate. In some cases, such as of deep references to inside objects fetched from external sources of metadata, resolved OPA objects, JWT claims, etc, it is impossible to validate for correct references.
Another experience learned from Authorino's Authorization JSON selectors is that they depend substantially on the so-called \"modifiers\". Many use cases involving parsing and breaking down attributes that are originally available in a more complex form would not be possible without the modifiers. Examples of such cases are: extracting portions of the path and/or query string parameters (e.g. collection and resource identifiers), applying translations on HTTP verbs into corresponding operations, base64-decoding values from the context HTTP request, amongst several others.
"},{"location":"architecture/rfcs/0002-well-known-attributes/#unresolved-questions","title":"Unresolved questions","text":" -
How to deal with the differences regarding the availability and data types of the attributes across clients/hosts?
-
Can we make more attributes that are currently available to only one of the components common to both?
-
Will we need some kind of global support for modifiers (functions) in the well-known selectors or those can continue to be an Authorino-only feature?
-
Does Authorino, which is more strict regarding the data structure that induces the selectors, need to implement this specification or could/should it keep its current selectors and a translation be performed by the AuthPolicy controller?
"},{"location":"architecture/rfcs/0002-well-known-attributes/#future-possibilities","title":"Future possibilities","text":" - Extend with more well-known attributes that abstract common patterns and/or for rather opinioned use cases. Examples:
auth.*
attributes supported in the rate limit service request.authenticated
request.operation.(read|write)
request.param.my-param
-
connection.secure
-
Other Envoy attributes
Wasm attributes Attribute
Type
Description
Auth
RL
wasm.plugin_name
String
Plugin name
\u2713
wasm.plugin_root_id
String
Plugin root ID
\u2713
wasm.plugin_vm_id
String
Plugin VM ID
\u2713
wasm.node
Node
Local node description
\u2713
wasm.cluster_name
String
Upstream cluster name
\u2713
wasm.cluster_metadata
Metadata
Upstream cluster metadata
\u2713
wasm.listener_direction
Number
Enumeration value of the listener traffic direction
\u2713
wasm.listener_metadata
Metadata
Listener metadata
\u2713
wasm.route_name
String
Route name
\u2713
wasm.route_metadata
Metadata
Route metadata
\u2713
wasm.upstream_host_metadata
Metadata
Upstream host metadata
\u2713
Proxy configuration attributes Attribute
Type
Description
Auth
RL
xds.cluster_name
String
Upstream cluster name
\u2713
xds.cluster_metadata
Metadata
Upstream cluster metadata
\u2713
xds.route_name
String
Route name
\u2713
xds.route_metadata
Metadata
Route metadata
\u2713
xds.upstream_host_metadata
Metadata
Upstream host metadata
\u2713
xds.filter_chain_name
String
Listener filter chain name
\u2713
- Add some support for value modifiers (functions), along the lines of Authorino's JSON path modifiers and/or Envoy attributes' path expressions.
"},{"location":"architecture/rfcs/0003-dns-policy/","title":"RFC Template","text":" - Feature Name: DNSPolicy
- Start Date: 2023-07-01
- RFC PR: Kuadrant/architecture#20
- Issue tracking: Kuadrant/multicluster-gateway-controller#219
- Labels: DNS, Load Balancing, Multi-Cluster
"},{"location":"architecture/rfcs/0003-dns-policy/#summary","title":"Summary","text":"Provide a policy for configuring how DNS should be handed for a given gateway. Provide a mechanism for enabling DNS based load balancing.
"},{"location":"architecture/rfcs/0003-dns-policy/#motivation","title":"Motivation","text":"Gateway admins, need a way to define the DNS policy for a multi-cluster gateway in order to control how much and which traffic reaches these gateways. Ideally we would allow them to express a strategy that they want to use without needing to get into the details of each provider and needing to create and maintain dns record structure and individual records for all the different gateways that may be within their infrastructure.
"},{"location":"architecture/rfcs/0003-dns-policy/#guide-level-explanation","title":"Guide-level explanation","text":"Allow definition of a DNSPolicy that configures load balancing to decide how traffic should be distributed across multiple gateway instances from the central control plane.
"},{"location":"architecture/rfcs/0003-dns-policy/#terms","title":"Terms","text":" - managed listener: This is a listener with a host backed by a DNS zone managed by the multi-cluster gateway controller
- hub cluster: control plane cluster that managed 1 or more spokes
- spoke cluster: a cluster managed by the hub control plane cluster. This is where gateway are instantiated
Provide a control plane DNSPolicy API that uses the idea of direct policy attachment from gateway API that allows a load balancing strategy to be applied to the DNS records structure for any managed listeners being served by the data plane instances of this gateway. The DNSPolicy also covers health checks that inform the DNS response but that is not covered in this document.
Below is a draft API for what we anticipate the DNSPolicy to look like
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n health:\n ...\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom: #optional\n\n - value: AWS #optional with both GEO and weighted. With GEO the custom weight is applied to gateways within a Geographic region\n weight: 10\n - value: GCP\n weight: 20\n GEO: #optional\n defaultGeo: IE # required with GEO. Chooses a default DNS response when no particular response is defined for a request from an unknown GEO.\n
"},{"location":"architecture/rfcs/0003-dns-policy/#available-load-balancing-strategies","title":"Available Load Balancing Strategies","text":"GEO and Weighted load balancing are well understood strategies and this API effectively allow a complex requirement to be expressed relatively simply and executed by the gateway controller in the chosen DNS provider. Our default policy will execute a \"Round Robin\" weighted strategy which reflects the current default behaviour.
With the above API we can provide weighted and GEO and weighted within a GEO. A weighted strategy with a minimum of a default weight is always required and the simplest type of policy. The multi-cluster gateway controller will set up a default policy when a gateway is discovered (shown below). This policy can be replaced or modified by the user. A weighted strategy can be complimented with a GEO strategy IE they can be used together in order to provide a GEO and weighted (within a GEO) load balancing. By defining a GEO section, you are indicating that you want to use a GEO based strategy (how this works is covered below).
apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: default-policy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted: # required\n defaultWeight: 10 #required, all records created get this weight\n health:\n ... \n
In order to provide GEO based DNS and allow customisation of the weighting, we need some additional information to be provided by the gateway / cluster admin about where this gateway has been placed. For example if they want to use GEO based DNS as a strategy, we need to know what GEO identifier(s) to use for each record we create and a default GEO to use as a catch-all. Also, if the desired load balancing approach is to provide custom weighting and no longer simply use Round Robin, we will need a way to identify which records to apply that custom weighting to based on the clusters the gateway is placed on.
To solve this we will allow two new attributes to be added to the ManagedCluster
resource as labels:
kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
These two labels allow setting values in the DNSPolicy that will be reflected into DNS records for gateways placed on that cluster depending on the strategies used. (see the first DNSPolicy definition above to see how these values are used) or take a look at the examples at the bottom.
example :
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-geo-code: \"IE\"\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\nspec: \n
The attributes provide the key and value we need in order to understand how to define records for a given LB address based on the DNSPolicy targeting the gateway.
The kuadrant.io/lb-attribute-geo-code
attribute value is provider specific, using an invalid code will result in an error status condition in the DNSrecord resource.
"},{"location":"architecture/rfcs/0003-dns-policy/#dns-record-structure","title":"DNS Record Structure","text":"This is an advanced topic and so is broken out into its own proposal doc DNS Record Structure
"},{"location":"architecture/rfcs/0003-dns-policy/#custom-weighting","title":"Custom Weighting","text":"Custom weighting will use the associated custom-weight
attribute set on the ManagedCluster
to decide which records should get a specific weight. The value of this attribute is up to the end user.
example:
apiVersion: cluster.open-cluster-management.io/v1\nkind: ManagedCluster\nmetadata:\n labels:\n kuadrant.io/lb-attribute-custom-weight: \"GCP\"\n
The above is then used in the DNSPolicy to set custom weights for the records associated with the target gateway.
- value: GCP\n weight: 20\n
So any gateway targeted by a DNSPolicy with the above definition that is placed on a ManagedCluster
with the kuadrant.io/lb-attribute-custom-weight
set with a value of GCP will get an A record with a weight of 20
"},{"location":"architecture/rfcs/0003-dns-policy/#status","title":"Status","text":"DNSPolicy should have a ready condition that reflect that the DNSRecords have been created and configured as expected. In the case that there is an invalid policy, the status message should reflect this and indicate to the user that the old DNS has been preserved.
We will also want to add a status condition to the gateway status indicating it is effected by this policy. Gateway API recommends the following status condition
- type: gateway.networking.k8s.io/PolicyAffected\n status: True \n message: \"DNSPolicy has been applied\"\n reason: PolicyApplied\n ...\n
https://github.com/kubernetes-sigs/gateway-api/pull/2128/files#diff-afe84021d0647e83f420f99f5d18b392abe5ec82d68f03156c7534de9f19a30aR888
"},{"location":"architecture/rfcs/0003-dns-policy/#example-policies","title":"Example Policies","text":""},{"location":"architecture/rfcs/0003-dns-policy/#round-robin-the-default-policy","title":"Round Robin (the default policy)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: RoundRobinPolicy\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n
"},{"location":"architecture/rfcs/0003-dns-policy/#geo-round-robin","title":"GEO (Round Robin)","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNS\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n GEO:\n defaultGeo: IE\n
"},{"location":"architecture/rfcs/0003-dns-policy/#custom","title":"Custom","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: SendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure #any record associated with a gateway on a cluster without this value gets the default\n weight: 30\n
"},{"location":"architecture/rfcs/0003-dns-policy/#geo-with-custom-weights","title":"GEO with Custom Weights","text":"apiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nname: GEODNSAndSendMoreToAzure\nspec:\n targetRef: # defaults to gateway gvk and current namespace\n name: gateway-name\n loadBalancing:\n weighted:\n defaultWeight: 10\n custom:\n\n - attribute: cloud\n value: Azure\n weight: 30\n GEO:\n defaultGeo: IE\n
"},{"location":"architecture/rfcs/0003-dns-policy/#reference-level-explanation","title":"Reference-level explanation","text":" - Add a DNSPolicy CRD that conforms to policy attachment spec
- Add a new DNSPolicy controller to MCG
- DNS logic and record management should all migrate out of the gateway controller into this new DNSPolicy controller as it is the responsibility and domain of the DNSPolicy controller to manage DNS
- remove the Hosts interface as we want do not want other controllers using this to bring DNS Logic into other areas of the code.
"},{"location":"architecture/rfcs/0003-dns-policy/#drawbacks","title":"Drawbacks","text":"You cannot have a different load balancing strategy for each listener within a gateway. So in the following gateway definition
spec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n hostname: myapp.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS\n - allowedRoutes:\n namespaces:\n from: All\n hostname: other.hcpapps.net\n name: api\n port: 443\n protocol: HTTPS \n
The DNS policy targeting this gateway will apply to both myapp.hcpapps.net and other.hcpapps.net
However, there is still significant value even with this limitation. This limitation is something we will likely revisit in the future
"},{"location":"architecture/rfcs/0003-dns-policy/#background-docs","title":"Background Docs","text":"DNS Provider Support
AWS DNS
Google DNS
Azure DNS
Direct Policy Attachment
"},{"location":"architecture/rfcs/0003-dns-policy/#rationale-and-alternatives","title":"Rationale and alternatives","text":"An alternative is to configure all of this yourself manually in a dns provider. This is can be a highly complex dns configuration that it would be easy to get wrong.
"},{"location":"architecture/rfcs/0004-policy-status/","title":"Policy Status","text":" - Feature Name:
policy_status_states
- Start Date: 2023-02-03
- RFC PR: Kuadrant/architecture#0009
- Issue tracking: Kuadrant/architecture#0038
"},{"location":"architecture/rfcs/0004-policy-status/#summary","title":"Summary","text":"This RFC proposes a new design for any Kuadrant Policy (RateLimitPolicy
, AuthPolicy
, etc..) status definition and transitions.
"},{"location":"architecture/rfcs/0004-policy-status/#motivation","title":"Motivation","text":"At the time being, the RateLimitPolicy
and AuthPolicy
status doesn't clearly and truthfully communicate the actual state of reconciliation and healthiness with its operator managed services, i.e., the Rate Limit service (\"Limitador\") and the Auth service (\"Authorino\"), referred to as \"Kuadrant services\".
As a consequence, misleading information is shared causing unexpected errors and flawed assumptions.
The following are some issues reported in relation to the aforementioned problems:
- https://github.com/Kuadrant/kuadrant-operator/issues/87
- https://github.com/Kuadrant/kuadrant-operator/issues/96
- https://github.com/Kuadrant/kuadrant-operator/issues/140
"},{"location":"architecture/rfcs/0004-policy-status/#guide-level-explanation","title":"Guide-level explanation","text":"This design for setting the status of the Kuadrant policy CRs is divided in 2 stages, where each stage could be applied/developed in order and would reflect valuable and accurate information with different degrees of acuity.
The Policy CRD Status in the following diagrams are simplified as states, which in the Reference-level explanation will be translated to the actual Status Conditions.
"},{"location":"architecture/rfcs/0004-policy-status/#stage-1","title":"Stage 1","text":"State of the policy CR defined by: application, validation, and reconciliation of it
The main signalization at Stage 1 is about whether a policy CR has been Accepted
or not.
States rationale:
Accepted
: This state is reached after the Validation
and Reconciliation
event has being successfully passed. Invalid
: When the Validation
process encounters an error, this state will be set. TargetNotFound
: This state will be set when the Reconciliation
process encounters an error. Conflicted
: This state will be set when the Reconciliation
process encounters an error.
Notes:
- States from the Stage 2 could be implemented as well, but only relying on Validation and Reconciliation events.
"},{"location":"architecture/rfcs/0004-policy-status/#stage-2","title":"Stage 2","text":"Final state of the policy CR defined by: health check with the Kuadrant services (post-reconciliation)
The Enforced
type is introduced to capture the difference between a policy been reconciled and it's been enforced at the service.
States rationale:
Enforced
: After a successful response of the Service Probe
, this states communicates the policy is finally enforced. PartiallyEnforced
: This state will be set when the Reconciliation
event encounters an overlap with other policies. Overridden
: This state will be set when the Reconciliation
event invalidates the policy because another one takes precedence.
"},{"location":"architecture/rfcs/0004-policy-status/#reference-level-explanation","title":"Reference-level explanation","text":"In general, the new states and conditions align with GEP-713.
Besides the proposed Accepted
PolicyType, the Enforced
PolicyType would be added to reflect the final state of the policy, which means that the policy is showing the synced actual state of the Kuadrant services.
The missing Failed
PolicyType would be implicitly represented by the TargetNotFound
and Invalid
PolicyTypeReason.
"},{"location":"architecture/rfcs/0004-policy-status/#conditions","title":"Conditions","text":"All conditions are top-level.
Type Status Reason Message Accepted True \"Accepted\" \"KuadrantPolicy has been accepted\" False \"Conflicted\" \"KuadrantPolicy is conflicted by [policy-ns/policy-name], ...\" False \"Invalid\" \"KuadrantPolicy is invalid\" False \"TargetNotFound\" \"KuadrantPolicy target [resource-name] was not found\" Enforced True \"Enforced\" \"KuadrantPolicy has been successfully enforced\" False \"Unknown\" \"KuadrantPolicy has encountered some issues\" False \"Overridden\" \"KuadrantPolicy is overridden by [policy-ns/policy-name], ...\" Messages corresponding falsey statuses are required and should reflect the error that encountered.
It's possible to have the Failed state as a top level condition too. In this case, it might be useful to consider a third \"Unknown\" status.
"},{"location":"architecture/rfcs/0004-policy-status/#policy-ancestor-status","title":"Policy ancestor status","text":"The Status stanza of the policy CRs must implement Gateway API's PolicyAncestorStatus struct. This will provide broader consistency and improved discoverability of effective policies.
"},{"location":"architecture/rfcs/0004-policy-status/#implementation-detailsrequisites","title":"Implementation details/requisites","text":"Full implementation of Stage 2 states assumes reporting mechanisms in place, provided by the Kuadrant services, that allow tracing the state of the configurations applied on the services, back to the original policies, to infer the final state of the policy CRs (i.e. whether truly Enforced
or not.)
Without such, Stage 2 could be only partially achieved, by relying only on Reconciliation events.
"},{"location":"architecture/rfcs/0004-policy-status/#drawbacks","title":"Drawbacks","text":" - This proposal will require to change the code controllers assert the status
- Since the Status is part of the \"API\", won't be backwards compatible
- Documentation updating
- The implementation of the affected policies will create a fan-out problem, that might lead to updating many policy objects and apiserver load.
"},{"location":"architecture/rfcs/0004-policy-status/#rationale-and-alternatives","title":"Rationale and alternatives","text":"Another option was considered (previously referred to as \"Option 1\"). While valid, this alternative would not align with GEP-713, neither it would be as flexible as the final design proposed.
Details of the discarded alternative This alternative design would come in 3 stages: **Stage 1 : State of the policy CR defined by: application and validation of it** This first stage is a simple version where the operator only relies on itself, not checking the healthiness with the Kuadrant services, but just validating the Spec. ![](0004-policy-status-assets/policy_status_1.png) States rationale: * `Created`: The initial state. It announces that the policy has successfully being created, the operator acknowledges it. * `Applied`: This state is reached after the `Validation` event has being successfully passed. * `Failed`: This one would be set when the `Validation` process encounters an error. This could be either condition's failed/error state or a top-level condition. * `Updated`: From `Failed` or `Applied`, it could be triggered a `Spec Change` event that would move it to this state. **Stage 2: Further reconciliation check provides a new state** This following one, besides checking what the former stage does, it also adds the states reflecting the reconciliation process of any needed Kubernets object, Kuadrant Services custom resources and any other 3rd party CR required. An example would be in the case of the RLP, it would create/update the `ConfigMap` holding the `Limitador` config file. ![](0004-policy-status-assets/policy_status_2.png) States rationale: * `Applied`: The __Applied__ state would not be final, and would be preceding a `Reconciliation` event. * `Reconciled`: It communicates that the policy has successfully being reconciled, and any K8s object or required CR has been updated. * `Failed`: This one would be reached when either of `Validation` and `Reconcilation` processes have encounter any errors. **Stage 3: Final state of the policy CR defined by: health check with the Kuadrant services (post-reconciliation)** The final stage would bring a greater degree of accuracy, thanks for a final process that would check the healthiness and configuration version the Kuadrant services currently enforces. ![](0004-policy-status-assets/policy_status_3.png) States rationale: * `Reconciled`: This state would precede the \"Health check\" process graphed as `Service Probe` event. * `Enforced`: After a successful response of the `Service Probe`, this states communicates the policy is finally enforced. This is the final top-level condition. * `Failed`: Now this state could also be set after encountering errors in the `Service Probe` check. The stages before mentioned would follow the [Kubernetes guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) regarding the Status object definition. **Conditions** All conditions are top-level. | Type | Status | Reason | Message | |-------------|--------|-----------------------------|-----------------------------------------------------------------------------| | Progressing | True | \"PolicyCreated\" | \"KuadrantPolicy created\" | | | True | \"PolicyUpdated\" | \"KuadrantPolicy has been updated\" | | | True | \"PolicyApplied\" | \"KuadrantPolicy has been successfully applied | | | True | \"PolicyReconciled\" | \"KuadrantPolicy has been successfully reconciled\" | | | False | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" | | | False | \"PolicyError\" | \"KuadrantPolicy has encountered an error\" | | Enforced | True | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" | | | False | \"PolicyPartiallyEnforced\" | \"KuadrantPolicy has encountered some issues and has been partially applied\" | | | False | \"PolicyOverridden\" | \"KuadrantPolicy is overridden by [policy-ns/policy-name]\" | | Failed | True | \"PolicyValidationError\" | \"KuadrantPolicy has failed to validate\" | | | True | \"PolicyServiceError\" | \"KuadrantPolicy has encountered has failed to enforce\" | | | False | \"PolicyEnforced\" | \"KuadrantPolicy has been successfully enforced\" |"},{"location":"architecture/rfcs/0004-policy-status/#prior-art","title":"Prior art","text":" - Kubernetes API Conventions
- Current KuadrantPolicy Status work
"},{"location":"architecture/rfcs/0004-policy-status/#unresolved-questions","title":"Unresolved questions","text":" - Is it worthy to implement a state machine or state machine design pattern to achieve this set of conditions?
"},{"location":"architecture/rfcs/0004-policy-status/#future-possibilities","title":"Future possibilities","text":"The implementation of this proposal could be part of kuadrant/gateway-api-machinery.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/","title":"Single Cluster DNSPolicy","text":" - Feature Name:
single-cluster-dnspolicy
- Start Date: 2023-10-09
- RFC PR: Kuadrant/architecture#30
- Issue tracking:
- Kuadrant/architecture#31
- Kuadrant/architecture#67
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#summary","title":"Summary","text":"Proposal for changes to the DNSPolicy
API to allow it to provide a simple routing strategy as an option in a single cluster context. This will remove, but not negate, the complex DNS structure we use in a multi-cluster environment and in doing so allow use of popular dns integrators such as external-dns .
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#motivation","title":"Motivation","text":"The DNSPolicy
API (v1alpha1), was implemented as part of our multi cluster gateway offering using OCM and as such the design and implementation were influenced heavily by how we want multi cluster dns to work.
- Decouple the API entirely from OCM and multi cluster specific concepts.
- Simplify the DNS record structure created for a gateway listeners host for single cluster use.
- Improve the likelihood of adoption by creating an integration path for other kubernetes dns controllers such as external-dns.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#guide-level-explanation","title":"Guide-level explanation","text":"The DNSPolicy can be used to target a Gateway in a single cluster context and will create dns records for each listener host in an appropriately configured external dns provider. In this context the advanced loadbalancing
configuration is unnecessary, and the resulting DNSRecord can be created mapping individual listener hosts to a single DNS A or CNAME record by using the simple
routing strategy in the DNSPolicy.
Example 1. DNSPolicy using simple
routing strategy
apiVersion: kuadrant.io/v1alpha2\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\n
apiVersion: gateway.networking.k8s.io/v1beta1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n gatewayClassName: istio\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"myapp.mn.hcpapps.net\"\n port: 80\n protocol: HTTP\nstatus:\n addresses:\n - type: IPAddress\n value: 172.31.200.0\n
In the example the api
listener has a hostname myapp.mn.hcpapps.net
that matches a hosted zone being managed by the provider referenced my-route53-credentials
in the DNSPolicy. As the simple
routing strategy is set in the DNSPolicy a DNSRecord resource with the following contents will be created:
apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nmetadata:\n name: prod-web-api\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
The providerRef
is included in the DNSRecord to allow the dns record controller to load the appropriate provider configuration during reconciliation and create the DNS records in the dns provider service e.g. route 53.
Example 2. DNSPolicy using simple
routing strategy on multi cluster gateway
apiVersion: kuadrant.io/v1alpha2\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n routingStrategy: simple\n
apiVersion: gateway.networking.k8s.io/v1beta1\nkind: Gateway\nmetadata:\n name: prod-web\n namespace: my-gateways\nspec:\n gatewayClassName: kuadrant-multi-cluster-gateway-instance-per-cluster\n listeners:\n\n - allowedRoutes:\n namespaces:\n from: All\n name: api\n hostname: \"myapp.mn.hcpapps.net\"\n port: 80\n protocol: HTTP\nstatus:\n addresses:\n - type: kuadrant.io/MultiClusterIPAddress\n value: 172.31.200.0\n - type: kuadrant.io/MultiClusterIPAddress\n value: 172.31.201.0\n
Similar to example 1, except here the Gateway is a multi cluster gateway that has had its status updated by the Gateway
controller to include kuadrant.io/MultiClusterIPAddress
type addresses. As the simple
routing strategy is set in the DNSPolicy a DNSRecord resource with the following contents will be created:
apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nmetadata:\n name: prod-web-api\n namespace: my-gateways\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n - 172.31.201.0\n
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#api-updates","title":"API Updates","text":"DNSPolicy:
- new providerRef field
spec.providerRef
- new routingStrategy field
spec.routingStrategy
- new api version
v1alpha2
DNSRecord:
spec.managedZone
replaced with spec.providerRef
- new zoneID field
spec.zoneID
- new api version
v1alpha2
ManagedZone:
- ManagedZone API wil be removed and no longer supported as part of MGC/Kuadrant.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnspolicyspecproviderref","title":"DNSPolicy.spec.providerRef","text":"The providerRef
field is mandatory and contains a reference to a secret containing provider credentials.
- `spec.providerRef.name` - name of the provider resource.\n
A DNSPolicy
referencing a providerRef secret will expect that secret to exist in the same namespace. The expected contents of the secrets data is comparable to the dnsProviderSecretRef
used by ManageZones.
apiVersion: v1\nkind: Secret\nmetadata:\n name: aws-credentials\ntype: kuadrant.io/aws\ndata:\n AWS_ACCESS_KEY_ID: \"foo\"\n AWS_SECRET_ACCESS_KEY: \"bar\"\n CONFIG:\n zoneIDFilter:\n\n - Z04114632NOABXYWH93QUl\n
The CONFIG
section of the secrets data will be added to allow provider specific configuration to be stored alongside the providers credentials and can be used during the instantiation of the provider client, and during any provider operations. The above for example would use the zoneIDFilter
value to limit what hosted zones this provider is allowed to update.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnspolicyspecroutingstrategysimpleweightedgeo","title":"DNSPolicy.spec.routingStrategy[simple|weightedGeo]","text":"The routingStrategy
field is mandatory and dictates what kind of dns record structure the policy will create. Two routing strategy options are allowed simple
or weightedGeo
.
A reconciliation of DNSPolicy processes the target gateway and creates a DNSRecord per listener that is supported by the currently configured provider(hostname matches the hosted zones accessible with the credentials and config). The routing strategy used will determine the contents of the DNSRecord resources Endpoints array.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#simple","title":"simple","text":"apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
Simple creates a single endpoint for an A record with multiple targets. Although intended for use in a single cluster context a simple routing strategy can still be used in a multi-cluster environment (OCM hub). In this scenario each clusters address will be added to the targets array to create a multi answer section in the dns response.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#weightedgeo","title":"weightedGeo","text":"apiVersion: kuadrant.io/v1alpha2\nkind: DNSRecord\nspec:\n providerRef:\n name: my-route53-credentials\n endpoints:\n\n - dnsName: myapp.mn.hcpapps.net\n recordTTL: 300\n recordType: CNAME\n targets:\n - lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: lb-4ej5le.myapp.mn.hcpapps.net\n providerSpecific:\n - name: geo-code\n value: '*'\n recordTTL: 300\n recordType: CNAME\n setIdentifier: default\n targets:\n - default.lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: default.lb-4ej5le.myapp.mn.hcpapps.net\n providerSpecific:\n - name: weight\n value: \"120\"\n recordTTL: 60\n recordType: CNAME\n setIdentifier: lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n targets:\n - lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n - dnsName: lrnse3.lb-4ej5le.myapp.mn.hcpapps.net\n recordTTL: 60\n recordType: A\n targets:\n - 172.31.200.0\n
WeightedGeo creates a more complex set of endpoints which use a combination of weighted and geo routing strategies. Although intended for use in a multi-cluster environment (OCM hub) it will still be possible to use it in a single cluster context. In this scenario the record structure described above would be created for the single cluster.
This is the current default for DNSPolicy in a multi-cluster environment (OCM hub) and more details about it can be found in the original DNSPolicy rfc.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnsrecordspecproviderref","title":"DNSRecord.spec.providerRef","text":"More details of providerRef
found in DNSPolicy.spec.providerRef
The DNSRecord API is updated to remove the managedZone
reference in favour of directly referencing the providerRef
credentials instead. The DNSRecord reconciliation will be unchanged except for loading the provider client from providerRef
credentials.
The DNSPolicy reconciliation will be updated to remove the requirement for a ManagedZone resource to be created before a DNSPolicy can create dns records for it, instead it will be replaced in favour of just listing available zones directly in the currently configured dns provider. If no matching zone is found, no DNSRecord will be created.
There is a potential for a DNSRecord to be created successfully, but then a provider updated to remove access. In this case it is the responsibility of the DNSPolicy controller to report appropriate status back to the policy and target resource about the failure to process the record. More details on how status will be reported can be found in rfc-0004
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#dnsrecordspeczoneid","title":"DNSRecord.spec.zoneID","text":"The zoneID
field is mandatory and contains the provider specific id of the hosted zone that this record should be published into.
The DNSRecord reconciliation will use this zone when creating/updating or deleting endpoints for this record set.
The zoneID
should not change after being selected during initial creation and as such will be marked as immutable.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#prior-art","title":"Prior art","text":"ExternalDNS
- Uses annotations on the target Gateway as opposed to a proper API.
- Requires access to the HTTP route resources.
- Supports only a single provider per external dns instance.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#unresolved-questions","title":"Unresolved questions","text":"When a provider is configured using a kind not supported by the DNSPolicy
controller e.g. ExternalDNS
we will be relying on an external controller to correctly update the status of any DNSRecord resources created by our policy. This may have a negative impact on our ability to correctly report status back to the target resource.
When using a weightedGeo routing strategy in a single cluster context it is not expected that this will offer multi cluster capabilities without the use of OCM. Currently, it is expected that if you want to create a recordset that contains the addresses of multiple clusters you must use an OCM hub.
"},{"location":"architecture/rfcs/0005-single-cluster-dnspolicy/#future-possibilities","title":"Future possibilities","text":"The ability to support other kubernetes dns controllers such as ExternalDNS would potentially allow us to contribute to some of these projects in the area of polices for dns management of Gateway resources in kubernetes.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/","title":"Configuration of Kuadrant Sub Components","text":" - Feature Name:
sub-components-config
- Start Date: 2023-09-11
- RFC PR: Kuadrant/architecture#25
- Issue tracking: Kuadrant/kuadrant-operator#163
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#summary","title":"Summary","text":"Enable configuration of sub components of Kuadrant from a centralized location, namely the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#motivation","title":"Motivation","text":"The initial request comes from MGC to configure Redis for Limitador by the following issue #163. MGC's current work around is to update the Limitador CR after the deployment with the configuration setting for Redis Instance. This change would allow for the configuration of sub components before the Kuadrant is deployed.
This reduces the number of CRs that users of Kuadrant are required to modify to get the installation they require. The sub components CRs (Authorino, Limitador) never have to be modified by a Kuadrant user (and should never be modified by a Kuadrant User).
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#guide-level-explanation","title":"Guide-level explanation","text":"As the Kuadrant operator would be responsible for reconciling these configurations into the requested components, restrictions and limitations can be placed on the components which maybe allowed in a standalone installation. An example in this space is the disk storage for Limitador which is a new feature and the Kuadrant installation may not want to support it till there is a proven track record for the feature.
For existing Kuadrant Users this may be a possible breaking changes if those users manually configure the Kuadrant sub components via their CRs. A guide can be created to help migrate the users configurations to the Kuadrant CR. This guide can be part of the release notes and/or possibly released before the release of Kuadrant.
The deployment configuration for each component can be placed in the Kuadrant CR. These configurations are then reconciled into the CRs for each component. Only the options below are exposed in the Kuadrant CR. All fields in the spec are optional.
apiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n name: kuadrant-sample\nspec:\n limitador:\n afffinity: ...\n listener: ...\n pdb: ...\n replicas: ...\n resourceRequirements: ...\n storage: ...\n authorino:\n evaluatorCacheSize: ...\n healthz: ...\n listener: ...\n logLevel: ...\n metrics: ...\n oidcServer: ...\n replicas: ...\n tracing: ...\n volumes: ...\nstatus:\n ...\n
The Kuadrant operator will watch for changes in the Authorino and Limitador CRs, reconciling back any changes that a user may do to these configurations. How ever Kuadrant operator will not reconcile fields that are given above. An example of this is the image
field on the Authorino CR. This field allows a user to set the image that Authorino is deployed with. The feature is meant for dev and testing purposes. If a user wishes to use a different image, they can. Kuadrant assumes they know what they are doing but requires the user to set the change on the component directly.
Only the sub component operator will be responsible for actioning the configurations pasted from the Kuadrant CR to the sub components CR. This ensure no extra changes will be required in the sub operators to meet the needs of Kuadrant.
Status errors related to the configuration of the sub components should be reported back to the Kuadrant CR. The errors messages in Kuadrant state what components are currently having issue and which resource to review for more details.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#reference-level-explanation","title":"Reference-level explanation","text":"All the fields in the Authorino and Limitador CRs that are configurable in the Kuadrant CR are optional and have sound defaults. Kuadrant needs to remain installable with out having to set any spec in the Kuadrant CR.
The Kuadrant operator should only reconcile the spec that is given. This would mean if the user states the number of replicas to be used in one of the components only the replica field for that component should be reconciled. As the other fields would be blank at this stage, blank fields would not be reconciled to the component CR. By this behaviour a few things are being achieved. Component controllers define the defaults to be used in the components. Optional fields in the component CRs never get set with blank values. Blank values in the component CR could override the defaults of the components causing unexpected behaviour. Existing Kuadrant users may already have custom fields set in the component CRs. By only reconciling the fields set in the kuadrant CR this allows time for a user to migrate their custom configuration from the component CR to the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#fields-to-reconcile","title":"Fields to reconcile","text":"Fields being reconcile can be classified into different groups. These classifications are based around the tasks a user is achieve.
- Kubernetes native, setting that affect how Kubernetes handles the resource.
- Observability, configuration settings that allow insights into how the applications are operation. This can be Kubernetes native or external tooling.
- Application Settings, setting targeting the application and how it connects to external services.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#authorino-spec","title":"Authorino Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#kubernetes-native","title":"Kubernetes native","text":" - replicas
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#observability","title":"Observability","text":" - healthz
- logLevel
- metrics
- tracing
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#application-settings","title":"Application Settings","text":" - evaluatorCacheSize
- listener
- oidcServer
- volumes
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#limitador-spec","title":"Limitador Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#kubernetes-native_1","title":"Kubernetes native","text":" - afffinity
- pdb
- replicas
- resourceRequirements
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#application-settings_1","title":"Application Settings","text":" - listener
- storage
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#fields-not-reconciled","title":"Fields not reconciled","text":"There are a number of fields in both Authorino and Limitador that are not reconciled. Reasons for doing this are:
It is better to start with a sub set of features and expand to include more at a later date. Removing feature support is far harder than adding it.
There are four classifications the unreconciled fields fail into.
- Deprecated, fields that are deprecated and/or have plans to be removed from the spec in the future.
- Unsupported, the features would have hard coded or expected defaults in the Kuadrant operator. Work would be required to all the custom configurations options.
- Dev/Testing focused, features that should only be used during development & testing and not recommended for production. The defaults would for the fields are the production recommendations.
- Reconciled by others, this mostly affects Limitador as the deployment configuration and runtime configuration are in the same CR. In the case of Kuadrant the runtime configuration for Limitador is added via the RateLimitingPolicy CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#authorino-spec_1","title":"Authorino Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unsupported","title":"Unsupported","text":" - clusterWide
- authConfigLabelSelectors
- secretLabelSelectors
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#devtesting-focused","title":"Dev/Testing focused","text":" - image
- imagePullPolicy
- logMode
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#limitador-spec_1","title":"Limitador Spec","text":""},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unsupported_1","title":"Unsupported","text":" - RateLimitHeaders
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#reconciled-by-others","title":"Reconciled by others","text":" - Limits
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#deprecated","title":"Deprecated","text":" - version
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#drawbacks","title":"Drawbacks","text":"As the Kuadrant CR spec will be a sub set of the features that can be configured in the sub components spec, extra maintenances will be required to ensure specs are in sync.
New features of a component will not be accessible in Kuadrant initially. This is both a pro and a con.
Documentation becomes harder, as the sub component should be documenting their own features but in Kuadrant the user does not configure the feature in sub component. This has the risk of confusing new users.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#rationale-and-alternatives","title":"Rationale and alternatives","text":" - Why is this design the best in the space of possible designs?
- What other designs have been considered and what is the rationale for not choosing them?
- What is the impact of not doing this?
One alternative that was being looked at was allowing the user to bring their own Limitador instances by stating which Limitador CR Kuadrant should use. A major point of issue with this approach was knowing what limits the user had configured and what limits Kuadrant configured. Sharing global counters is a valid reason to want to share Limitador instances. How ever it this case Limitador would not be using one replica and therefore would have a back-end storage configured. It is the back-end storage that needs to be shared across instances. This can be done with adding the configuration in the Kuadrant CR.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#prior-art","title":"Prior art","text":"Discuss prior art, both the good and the bad, in relation to this proposal. A few examples of what this can include are:
- Does another project have a similar feature?
- What can be learned from it? What's good? What's less optimal?
- Papers: Are there any published papers or great posts that discuss this? If you have some relevant papers to refer to, this can serve as a more detailed theoretical background.
This section is intended to encourage you as an author to think about the lessons from other tentatives - successful or not, provide readers of your RFC with a fuller picture.
Note that while precedent set by other projects is some motivation, it does not on its own motivate an RFC.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#unresolved-questions","title":"Unresolved questions","text":" - What parts of the design do you expect to resolve through the RFC process before this gets merged?
- What parts of the design do you expect to resolve through the implementation of this feature before stabilization?
- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC?
- Is there a need to add validation on the configuration?
- If a valid configuration is add to the Kuadrant CR and this configuration is pass to the sub components CR but there is a error trying to setting up the configuration. How is this error reported back to the user? An example of this is configuring Redis as the back-end in Limitador, this requires stating the name and namespace of a configmap. The Limitador CR will have an error if the configmap does not exist and as the user only configures the Kuadrant CR this error may go unnoticed. This is only one example but there is a need for good error reporting back to the user, where they would expect to see the error.
"},{"location":"architecture/rfcs/0006-kuadrant_sub_components_configurations/#future-possibilities","title":"Future possibilities","text":"Think about what the natural extension and evolution of your proposal would be and how it would affect the platform and project as a whole. Try to use this section as a tool to further consider all possible interactions with the project and its components in your proposal. Also consider how this all fits into the roadmap for the project and of the relevant sub-team.
This is also a good place to \"dump ideas\", if they are out of scope for the RFC you are writing but otherwise related.
Note that having something written down in the future-possibilities section is not a reason to accept the current or a future RFC; such notes should be in the section on motivation or rationale in this or subsequent RFCs. The section merely provides additional information.
The implementation stated here allows the user to state spec fields in the component CRs or the Kuadrant CR (Kuadrant CR overrides the component CRs). A future possibility would be to warn the user if they add configuration to the components CRs that would get overridden if the same spec fields are configured in the Kuadrant CR.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/","title":"RFC - Policy Sync","text":" - Feature Name:
policy_sync_v1
- Start Date: 2023-10-10
- RFC PR: Kuadrant/architecture#0000
- Issue tracking: https://github.com/Kuadrant/architecture/issues/26
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#summary","title":"Summary","text":"The ability for the Multicluster Gateway Controller to sync policies defined in the hub cluster downstream to the spoke clusters, therefore allowing all policies to be defined in the same place. These policies will be reconciled by the downstream policy controller(s).
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#nomenclature","title":"Nomenclature","text":" -
Policy: When refering to a Policy, this document is refering to a Gateway API policy as defined in the Policy Attachment Model. The Multicluster Gateway Controller relies on OCM as a Multicluster solution, which defines its own unrelated set of Policies and Policy Framework. Unless explicitely mentioned, this document refers to Policies as Gateway API Policies.
-
Policy overriding: The concept of policy overriding is mentioned in this document. It refers to the proposed ability of the downstream Gateway implementation to prioritise downstream Policies against synced Policies in case of conflicts.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#motivation","title":"Motivation","text":"Currently, Kuadrant's support for the Policy Attachment Model can be divided in two categories:
- Policies targeting the Multicluster Gateway, defined in the hub cluster and reconciled by the Multicluster Gateway Controller
- Policies targeting the downstream Gateway, defined in the spoke clusters and reconciled by the downstream Gateway controllers.
In a realistic multicluster scenario where multiple spoke clusters are present, the management of these policies can become tedious and error-prone, as policies have to be defined in the hub cluster, as well as replicated in the multiple spoke clusters.
As Kuadrant users:
- Gateway-admin has a set of homogeneous clusters and needs to apply per cluster rate limits across the entire set.
- Platform-admin with a set of clusters with rate limits applied needs to change rate limit for one particular cluster.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#guide-level-explanation","title":"Guide-level explanation","text":"The policy sync feature will allow a gateway-admin to configure, via GatewayClass parameters, a set of Policy GVRs to be synced by the Multicluster Gateway Controller.
The policiesToSync
field in the parameters defines those GVRs. For example, in order to configure the controller to sync AuthPolicies:
\"policiesToSync\": [\n {\n \"group\": \"kuadrant.io\",\n \"version\": \"v1beta1\",\n \"resource\": \"authpolicies\" \n }\n]\n
The support for resources that the controller can sync is limited by the following:
- The controller ServiceAccount must have permission to watch, list, and get the resource to be synced
- The resource must implement the Policy schema:
- Have a
.spec.targetRef
field
When a Policy is configured to be synced in a GatewayClass, the Multicluster Gateway Controller starts watching events on the resources, and propagates changes by placing the policy in the spoke clusters, with the following mutations:
- The
TargetRef
of the policy is changed to reference the downstream Gateway - The
kuadrant.io/policy-synced
annotation is set
The upstream policy is annotated with a reference to the name and namespace of the downstream policies:
annotations:\n \"kuadrant.io/policies-synced\": \"[{\\\"cluster\\\": \\\"...\\\", \\\"name\\\": \\\"...\\\", \\\"namespace\\\": \\\"...\\\"}]\"\n
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#process-overview","title":"Process overview","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#dynamic-policy-watches","title":"Dynamic Policy watches","text":"The Multicluster Gateway Controller reconciles parameters referenced by the GatewayClass of a Gateway. A new field is added to the parameters that allows the configuration of a set of GVRs of Policies to be synced.
The GatewayClass reconciler validates that:
- The GVRs reference existing resource definitions
- The GVRs reference resources that implement the Policy schema.
Validation failures are reported as part of the status of the GatewayClass
The Gateway reconciler sets up dynamic watches to react to events on the configured Policies, calling the PolicySyncer component with the updated Policy as well as the associated Gateway.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#policysyncer-component","title":"PolicySyncer component","text":"The PolicySyncer component is in charge of reconciling Policy watch events to apply the necessary changes and place the Policies in the spoke clusters.
This component is injected in the event source and called when a change is made to a hub Policy that has been configured to be synced.
The PolicySyncer implementation uses OCM ManifestWorks to place the policies in the spoke clusters. Through the ManifestWorks, OCM allows to:
- Place the Policy in each spoke cluster
- Report the desired status back to the hub using JSON feedback rules
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#policy-hierarchy","title":"Policy Hierarchy","text":"In order to avoid conflict with Policies created directly in the spoke clusters, a hierarchy must be defined to prioritise those Policies.
The controller will set the kuadrant.io/policy-synced
annotation on the policy when placing it in the spoke cluster.
The Kuadrant operator will be aware of the presence of this annotation, and, in case of conflicts, override Policies that contain this annotation. When a policy is overriden due to conflicts, the Enforced
status will be set to False
, with the reason being Overriden
and a human readable message explaining the reason why the policy was overriden. See Policy Status RFC
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#drawbacks","title":"Drawbacks","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#third-party-policy-support","title":"Third party Policy support","text":"In order for a Policy to be supported for syncing, the MGC must have permissions to watch/list/get the resource, and the implementation of the downstream Gateway controller must be aware of the policy-synced
annotation.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#alternatives","title":"Alternatives","text":"Different technology stacks are available to sync resources across clusters. However, adoption of these technologies for the purpose of the goal this RFC intends to achieve, implies adding another dependency to the current stack, with the cost of added complexity and maintainance effort.
The MGC currently uses OCM to place Gateways across clusters. Relying on OCM for the purpose of placing Policies is the most straightforward alternative from a design and implementation point of view.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#consequences-of-not-implementing","title":"Consequences of not implementing","text":"Gateway-admins will have no centralized system for handling spoke-level policies targeting a gateway created there from the hub.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#ocms-policy-framework-will-not-be-used-to-complete-this-objective","title":"OCMs Policy Framework will not be used to complete this objective:","text":"OCMs Policy Framework is a system designed to make assertions about the state of a spoke, and potentially take actions based on that state, as such it is not a suitable replacement for manifestworks in the case of syncing resources to a spoke.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#potential-migration-from-manifestworks-to-manifestworkreplicasets","title":"Potential migration from ManifestWorks to ManifestWorkReplicaSets","text":"ManifestWorkPeplicaSets may be a future improvement that the MGC could support to simplify the placement of related resources, but beyond the scope of this RFC.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#prior-art","title":"Prior art","text":"No applicable prior art.
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#unresolved-questions","title":"Unresolved questions","text":""},{"location":"architecture/rfcs/0007-policy-sync-v1/#status-reporting","title":"Status reporting","text":"While the controller can assume common status fields among the Policies that it syncs, there might be a scenario where certain policies use custom status fields that are not handled by the controller. In order to support this, two alternatives are identified:
-
Configurable rules.
An extra field is added in the GatewayClass params that configures the policies to sync, to specify custom fields that the controller must propagate back from the spokes to the hub.
-
Hard-coded support.
The PolicySync component can identify the Policy type and select which extra status fields are propagated
"},{"location":"architecture/rfcs/0007-policy-sync-v1/#future-possibilities","title":"Future possibilities","text":"If OCMs Policy Framework is updated to enable syncing of resources status back to the hub, it could be an opportunity to refactor the MGC to use this framework in place of the current approach of creating ManifestWorks directly.
This system could mutate over time to dynamically sync more CRDs than policies to spoke clusters.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/","title":"Kuadrant Release Process","text":" - Feature Name:
kuadrant-release-process
- Start Date: 2024-01-11
- RFC PR: Kuadrant/architecture#46
- Issue tracking: Kuadrant/architecture#59
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#summary","title":"Summary","text":"Kuadrant is a set of components whose artifacts are built and delivered independently. This RFC aims to define every aspect of the event of releasing a new version of the whole, in terms of versioning, cadence, communication, channels, handover to other teams, etc.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#motivation","title":"Motivation","text":"At the time being, there's no clear process nor guidelines to follow when releasing a new version of Kuadrant, which leads to confusion and lack of transparency. We are currently relying on internal communication and certain people in charge of the release process, which is not ideal.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#guide-level-explanation","title":"Guide-level explanation","text":"First, we need to define what releasing Kuadrant means, in a clear and transparent way that communicates to the community what's happening and what to expect. The Kuadrant suite is composed of several components, each of them with its own set of artifacts and versioning scheme. Defining the release process of the whole suite is a complex task, and it's not only about the technical details of releasing the components, but also about the communication and transparency with the community, the definition of the frequency of the releases, and when it's ready to be handover to other teams like QA. This section aims to provide guidelines for the different aspects of the release process.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#components-and-versioning","title":"Components and versioning","text":"The set of components that are part of the Kuadrant suite are the following:
- Authorino: Kubernetes-native authorization service for tailor-made Zero Trust API security.
- Authorino Operator: A Kubernetes Operator to manage Authorino instances.
- Limitador: A generic rate-limiter written in Rust.
- Limitador Operator: A Kubernetes Operator to manage Limitador deployments.
- Wasm Shim: A Proxy-Wasm module written in Rust, acting as a shim between Envoy and Limitador.
- Multicluster Gateway Controller: Provides multi-cluster connectivity and global load balancing.
- DNS Operator: A Kubernetes Operator to manage DNS in single and multi-cluster environments.
- Kuadrant Operator: The Operator to install and manage the lifecycle of the Kuadrant components deployments. Example alerts and dashboards are also included as optional.
- kuadrantctl: A CLI tool for managing Kuadrant configurations and resources.
Each of them needs to be versioned independently, and the versioning scheme should follow Semantic Versioning. At the time of cutting a release for any of them, it's important to keep in mind what section of the version to bump, given a version number MAJOR.MINOR.PATCH, increment the:
- MAJOR version when you make incompatible API changes
- MINOR version when you add functionality in a backward compatible manner
- PATCH version when you make backward compatible bug fixes
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
A more detailed explanation of the versioning scheme can be found in the Semantic Versioning website.
By releasing a new version of Kuadrant, we mean releasing the set of components with their corresponding semantic versioning, some of them maybe freshly released, or others still using versioning from the previous one, and being the version of the Kuadrant Operator the one that defines the version of the whole suite.
Kuadrant Suite vx.y.z = Kuadrant Operator vx.y.z + Authorino Operator va.b.c + Limitador Operator vd.e.f + DNS Operator vg.h.i + MGC Controller vj.k.l + Wasm Shim vm.n.o\n
The technical details of how to release each component are out of the scope of this RFC and could be found in the Kuadrant components CI/CD RFC.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#qa-sanity-check","title":"QA Sanity Check","text":"Probably the most important and currently missing step in the release process is the green flagging from the Quality Assurance (QA) team. The QA team is responsible for testing the different components of the Kuadrant suite, and they need to be aware of the new version of the suite that is going to be released, what are the changes that are included, bug fixes and new features in order they can plan their testing processes accordingly. This check is not meant to be a fully fledged assessment from the QA team when it's handover to them, it's aimed to not take more than 1-2 days, and ideally expected to be fully automated. This step will happen once the release candidate has no PRs pending to be merged, and it has been tested by the Engineering team. The QA team should work closely to the engineering throughout the process, both teams aiming for zero handover time and continuous delivery mindset, so immediate testing can be triggered on release candidates once handed over. This process should happen without the need of formal communication between the teams or any overhead in general, but by keeping constant synergy between quality and product engineering instead.
There is an ideal time to hand over to the QA team for testing, especially since we are using GitHub for orchestration, we could briefly define it in the following steps:
- Complete Development Work: The engineering team completes their work included in the milestone.
- Create Release Candidate: The engineering team creates Release Candidate builds and manifests for all components required for the release
- Flagging/Testing: The QA team do the actual assertion/testing of the release candidate, checking for any obvious bugs or issues. Then QA reports all the bugs as GitHub issues and communicates testing status back publicly on Slack and/or email.
- Iterate: Based on the feedback from the QA team, the Engineering team makes any necessary adjustments and repeats the process until the release candidate is deemed ready for production.
- Publish Release: Once QA communicates that the testing has been successfully finished, the engineering team will publish the release both on Github and in the corresponding registries, updates documentation for the new release, and communicates it to all channels specified in Communication section.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#cadence","title":"Cadence","text":"Once the project is stable enough, and its adoption increases, the community will be expecting a certain degree of commitment from the maintainers, and that includes a regular release cadence. The frequency of the releases of the different components could vary depending on the particular component needs. However, the Kuadrant Operator it's been discussed in the past that it should be released every 3-4 weeks initially, including the latest released version of every component in the suite. There's another RFC that focuses on the actual frequency of each component, one could refer to the Kuadrant Release Cadence RFC.
There are a few reasons for this:
- Delivering Unparalleled Value to Users: Regular releases can provide users with regular updates and improvements. These updates can include new features and essential bug fixes, thus enhancing the overall value delivered to the users.
- Resource Optimization: By releasing software at regular intervals, teams can align their activities with available resources and environments, ensuring optimal utilization. This leads to increased efficiency in the deployment process and reduces the risk of resource wastage.
- Risk Management: Regular releases can help identify and fix issues early, reducing the risk of major failures that could affect users.
- Feedback Cycle: Regular releases allow for quicker feedback cycles. This means that any issues or improvements identified by users can be addressed promptly, leading to a more refined product over time.
- Synchronization: Regular releases can help synchronize work across different teams or departments, creating a more reliable, dependable solution development and delivery process.
- Reduced Complexity: Managing a smaller number of releases can reduce complexity. For example, having many different releases out in the field can lead to confusion and management overhead.
By committing to a release cadence, software projects can benefit from improved efficiency, risk management, faster feedback cycles, synchronization, and reduced complexity.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#repositories-and-hubs","title":"Repositories and Hubs","text":"Every component in Kuadrant has its own repository, and the source code is hosted in GitHub, mentioned in the previous section. However, the images built and manifests generated are hosted in different registries, depending on the component. The following table shows the different registries used by each component:
Component Artifacts Registry / Hub Authorino authorino images Quay.io Authorino Operator authorino-operator images Quay.io authorino-operator-bundle images Quay.io authorino-operator-catalog images Quay.io authorino-operator manifests OperatorHub.io Limitador limitador server images Quay.io limitador crate Crates.io Limitador Operator limitador-operator images Quay.io limitador-operator-bundle images Quay.io limitador-operator-catalog images Quay.io limitador-operator manifests OperatorHub.io Wasm Shim wasm-shim images Quay.io Multicluster Gateway Controller multicluster-gateway-controller images Quay.io multicluster-gateway-controller-bundle images Quay.io multicluster-gateway-controller-catalog images Quay.io DNS Operator dns-operator images Quay.io dns-operator-bundle images Quay.io dns-operator-catalog images Quay.io Kuadrant Operator kuadrant-operator images Quay.io kuadrant-operator-bundle images Quay.io kuadrant-operator-catalog images Quay.io kuadrant-operator manifests OperatorHub.io kuadrant-operator source (includes example dashboards and alerts) Github Releases kuadrantctl kuadrantctl CLI Github Releases"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#documentation","title":"Documentation","text":"It's important to note that keeping the documentation up to date is a responsibility of the component maintainers, and it needs to be done before releasing a new version of the component. The importance of keeping a clear and up-to-date documentation is crucial for the success of the project.
The documentation for the Kuadrant suite is compiled and available on the Kuadrant website. One can find the source of the documentation within each component repository, in the docs
directory. However, making this information available on the website is a manual process, and should be done by the maintainers of the project. The process of updating the documentation is simple and consists of the following steps:
- Update the documentation in the corresponding component repository.
- Follow the instruction in https://github.com/Kuadrant/docs.kuadrant.io/ to update the Docs pointers to the tag or branch of the component repository that contains the updated documentation.
- Once the changes are merged to main, the workflow that updates the website will be triggered, and the documentation will be updated.
- If for some reason it's needed to trigger the workflow manually, one can do it from the GitHub Actions tab in the docs.kuadrant.io (
Actions > ci > Run Workflow
).
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#communication","title":"Communication","text":"Another important aspect of releasing a new version of the Kuadrant suite is the communication with the community and other teams within the organization. A few examples of the communication channels that need to be updated are:
- Changelog generation
- Release notes
- Github Release publication
- Slack channel in Kubernetes workspace
- Blog post, if applicable
- Social media, if applicable
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#rationale-and-alternatives","title":"Rationale and alternatives","text":"The alternative to the proposal is to keep the current process, which is not ideal and leads to confusion and lack of transparency.
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#prior-art","title":"Prior art","text":"There's been an organically grown process for releasing new versions of the Kuadrant suite, which is not documented and it's been changing over time. However, there are some documentation for some of the components, worth mentioning:
- Authorino release process
- Authorino Operator release process
- Limitador release process
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#unresolved-questions","title":"Unresolved questions","text":" - What would be Kuadrant support policy?
- How many version are we going to back-port security and bug fixes to?
- What other teams need to be involved in the release process?
"},{"location":"architecture/rfcs/0008-kuadrant-release-process/#future-possibilities","title":"Future possibilities","text":"Once the release process is accepted and battle-tested, we could aim to automate the process as much as possible.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/","title":"Defaults & Overrides","text":" - Feature Name:
defaults-and-overrides
- Start Date: 2024-02-15
- RFC PR: Kuadrant/architecture#58
- Issue tracking: Kuadrant/kuadrant-operator#431
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#summary","title":"Summary","text":"This is a proposal for extending the Kuadrant Policy APIs to fully support use cases of Defaults & Overrides (D/O) for Inherited Policies, including the base use cases of full default and full override, and more specific nuances that involve merging individual policy rules (as defaults or overrides), declaring constraints and unsetting defaults.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#motivation","title":"Motivation","text":"As of Kuadrant Operator v0.6.0, Kuadrant policy resources that have hierarchical effect across the tree of network objects (Gateway, HTTPRoute), or what is known as Inherited Policies, provide only limited support for setting defaults and no support for overrides at all.
The above is notably the case of the AuthPolicy and the RateLimitPolicy v1beta2 APIs, shipped with the aforementioned version of Kuadrant. These kinds of policies can be attached to Gateways or to HTTPRoutes, with cascading effects through the hierarchy that result in one effective policy per gateway-route combination. This effective policy is either the policy attached to the Gateway or, if present, the one attached to the HTTRoute, thus conforming with a strict case of implicit defaults set at the level of the gateway.
Enhancing the Kuadrant Inherited Policy CRDs, so the corresponding policy instances can declare defaults
and overrides
stanzas, is imperative:
- to provide full support for D/O along the lines proposed by GEP-713 (to be superseded by GEP-26491) of the Kubernetes Gateway API special group (base use cases);
- to extend D/O support to other derivative cases, learnt to be just as important for platform engineers and app developers who require more granular policy interaction on top of the base cases;
- to support more sophisticated hierarchies with other kinds of network objects and/or multiples policies targetting at the same level of the hierarchy (possibly, in the future.)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#guide-level-explanation","title":"Guide-level explanation","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#conceptualization-and-user-story","title":"Conceptualization and User story","text":"The base use cases for Defaults & Overrides (D/O) are:
- Defaults (D): policies declared lower in the hierarchy supersede ones set (as \"defaults\") at a higher level, or \"more specific beats less specific\"
- Overrides (O): policies declared higher in the hierarchy (as \"overrides\") supersede ones set at the lower levels, or \"less specific beats more specific\"
The base cases are expanded with the following additional derivative cases and concepts:
- Merged defaults (DR): \"higher\" default policy rules that are merged into more specific \"lower\" policies (as opposed to an atomic less specific set of rules that is activated only when another more specific one is absent)
- Merged overrides (OR): \"higher\" override policy rules that are merged into more specific \"lower\" policies (as opposed to an atomic less specific set of rules that is activated fully replacing another more specific one that is present)
- Constraints (C): specialization of an override that, rather than declaring concrete values, specify higher level constraints (e.g., min value, max value, enums) for lower level values, with the semantics of \"clipping\" lower level values so they are enforced, in an override fashion, to be the boundaries dictated by the constraints; typically employed for constraining numeric values and regular patterns (e.g. limited sets)
- Unsetting (U): specialization that completes a merge default use case by allowing lower level policies to disable (\"unset\") individual defaults set a higher level (as opposed to superseding those defaults with actual, more specific, policy rules with proper meaning)
Together, these concepts relate to solve the following user stories:
User story Group Unique ID As a Platform Engineer, when configuring a Gateway, I want to set a default policy for all routes linked to my Gateway, that can be fully replaced with more specific ones(*). D gateway-default-policy As a Platform Engineer, when configuring a Gateway, I want to set default policy rules (parts of a policy) for all routes linked to my Gateway, that can be individually replaced and/or expanded by more specific rules(*). DR gateway-default-policy-rule As a Platform Engineer, when defining a policy that configures a Gateway, I want to set constraints (e.g. minimum/maximum value, enumerated options, etc) for more specific policy rules that are declared(*) with the purpose of replacing the defaults I set for the routes linked to my Gateway. C policy-constraints As a Platform Engineer, when configuring a Gateway, I want to set a policy for all routes linked to my Gateway, that cannot be replaced nor expanded by more specific ones(*). O gateway-override-policy As a Platform Engineer, when configuring a Gateway, I want to set policy rules (parts of a policy) for all routes linked to my Gateway, that cannot be individually replaced by more specific ones(*), but only expanded with additional more specific rules(*). OR gateway-override-policy-rule As an Application Developer, when managing an application, I want to set a policy for my application, that fully replaces any default policy that may exist for the application at the level of the Gateway, without having to know about the existence of the default policy. D route-replace-policy As an Application Developer, when managing an application, I want to expand a default set of policy rules set for my application at the level of the gateway, without having to refer to those existing rules by name. D/O route-add-policy-rule As an Application Developer, when managing an application, I want to unset an individual default rule set for my application at the level of the gateway. U route-unset-policy-rule (*) declared in the past or in the future, by myself or any other authorized user.
The interactive nature of setting policies at levels in the hierarchy and by different personas, make that the following additional user stories arise. These are stories here grouped under the Observability (Ob) aspect of D/O, but referred to as well in relation to the \"Discoverability Problem\" described by Gateway API.
User story Group Unique ID As one who has read access to Kuadrant policies, I want to view the effective policy enforced at the traffic routed to an application, considering all active defaults and overrides at different policies(*). Ob view-effective-policy As a Platform Engineer, I want to view all default policy rules that may have been replaced by more specific ones(*). Ob view-policy-rule-status As a Policy Manager, I want to view all gateways and/or routes whose traffic is subject to enforcement of a particular policy rule referred by name. Ob view-policy-rule-reach (*) declared in the past or in the future, by myself or any other authorized user.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#writing-do-enabled-kuadrant-policies","title":"Writing D/O-enabled Kuadrant Policies","text":"Writing a Kuadrant policy enabled for Defaults & Overrides (D/O), to be attached to a network object, involves declaring the following fields at the first level of the spec:
targetRef
(required): the reference to a hierarchical network object targeted by the policy, typed as a Gateway API PolicyTargetReference
or PolicyTargetReferenceWithSectionName
type defaults
: a block of default policy rules with further specification of a strategy (atomic set of rules or individual rules to be merged into lower policies), and optional conditions for applying the defaults down through the hierarchy overrides
: a block of override policy rules with further specification of a strategy (atomic set of rules or individual rules to be merged into lower policies), and optional conditions for applying the overrides down through the hierarchy - the bare policy rules block without further qualification as a default or override set of rules \u2013 e.g. the
rules
field in a Kuadrant AuthPolicy, the limits
field in a RateLimitPolicy.
Between the following mutually exclusive options, either one or the other shall be used in a policy:
defaults
and/or overrides
blocks; or - the bare set of policy rules (without further qualification as neither defaults nor overrides.)
In case the bare set of policy rules is used, it is treated implicitly as a block of defaults.
Supporting specifying the bare set of policy rules at the first level of the spec, alternatively to the defaults
and overrides
blocks, is a strategy that aims to provide:
- more natural usability, especially for those who write policies attached to the lowest level of the hierarchy supported; as well as
- backward compatibility for policies that did not support explicit D/O and later on have moved to doing so.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#inherited-policies-that-declare-an-intent","title":"Inherited Policies that declare an intent","text":"A policy that does not specify D/O fields (defaults
, overrides
) is a policy that declares an intent.
One who writes a policy without specifying defaults
or overrides
, but only the bare set of policy rules, may feel like declaring a Direct Policy. Depending on the state of other policies indirectly affecting the same object or not, the final outcome can be the same as writing a direct policy. This is especially true when the policy that declares the intent targets an object whose kind is the lowest kind accepted by Kuadrant in the hierarchy of network resources, and there are no other policies with lower precedence.
Nevertheless, because other policies can affect the final behavior of the target (e.g. by injecting defaults, by overriding rules, by adding more definitions beneath), policies that simply declare an intent, conceptually, are still Inherited Policies.
Compared to the inherited policy that misses D/O blocks, these other policies affecting the behavior may be declared:
- at higher levels in the hierarchy,
- at lower levels in hierarchy, or even
- at the same level in the hierarchy but happening to have lower precedence (if such case is allowed by the kind of policy.)
At any time, any one of these policies can be created and therefore the final behavior of a target should never be assumed to be equivalent to the intent declared by any individual policy in particular, but always collectively determined by the combination of all intents, defaults and overrides from all inherited policies affecting the target.
From GEP-2649:
If a Policy can be used as an Inherited Policy, it MUST be treated as an Inherited Policy, regardless of whether a specific instance of the Policy is only affecting a single object.
An inherited policy that simply declares an intent (i.e. without specifying D/O) will be treated as a policy that implicitly declares an atomic set of defaults, whether the policy targets higher levels in the hierarchy or lower ones. In the absence of any other conflicting policy affecting the same target, the behavior equals the defaults which equal the intent.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#inherited-policies-that-modify-an-intent","title":"Inherited Policies that modify an intent","text":"A policy that specifies D/O fields (defaults
, overrides
) is a policy explicitly declared to modify an intent.
Without any other policy with lower precedence, there is no special meaning in choosing whether defaults or overrides in a inherited policy that targets an object whose kind is the lowest kind accepted by Kuadrant in the hierarchy of network resources. The sets of rules specified in these policies affect indistinctively the targeted objects, regardless of how they are qualified.
However, because other policies may ocasionally be declared with lower precedence (i.e. targeting lower levels in the hierarchy or due to ordering, see Conflict Resolution), one who declares a policy to modify an intent must carefuly choose between defaults
and/or overrides
blocks to organize the policy rules, regardless if the targeted object is of a kind that is the lowest kind in the hierarchy of network resources accepted by Kuadrant.
Even in the cases where no more than one policy of a kind is allowed to target a same object (1:1 relationship) and thus there should never exist two policies affecting a target from a same level of the hierarchy simultaneaously (or equivalently a policy with lower precedence than another, both at the lowest level of the hierarchy), users must assume that this constraint may change (i.e. N:1 relationship between policies of a kind and target become allowed.)
In all cases, defaults and overrides must be used with the semantics of declaring rules that modify an intent.
- When an intent does not specify a rule for which there is a higher default declared, the default modifies the intent by setting the value specified by the default.
- For an intent that whether specifies or omits a rule for which there is a higher override declared, the override modifies the intent by setting the value specified by the override.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#identifying-inherited-policy-kinds","title":"Identifying inherited policy kinds","text":"All Custom Resource Definitions (CRDs) that define a Kuadrant inherited policy must be labeled gateway.networking.k8s.io/policy: inherited
.
Users can rely on the presence of that label to identify policy kinds whose instances are treated as inhertied policies.
In some exceptional cases, there may be kinds of Kuadrant policies that do not specify defaults
and overrides
blocks, but that are still labeled as inherited policy kinds. Instances of these kinds of policies implicitly declare an atomic sets of defaults, similarly to described in Inherited Policies that declare an intent.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-of-do-enabled-kuadrant-policy","title":"Examples of D/O-enabled Kuadrant policy","text":"Example 1. Atomic defaults
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n strategy: atomic\n
The above is a proper Inherited Policy that sets a default atomic set of auth rules that will be set at lower objects in case those lower object do not have policies attached of their own at all.
The following is a sligthly different example that defines auth rules that will be individually merged into lower objects, evaluated one by one if already defined at the \"lower\" (more specific) level and therefore should take precedence, or if otherwise is missing at the lower level and therefore the default should be activated.
Example 2. Merged defaults
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n strategy: merge\n
Similarly, a set of overrides
policy rules could be specified, instead or alongside with the defaults
set of policy rules.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#atomic-vs-individually-merged-policy-rules","title":"Atomic vs. individually merged policy rules","text":"There are 2 supported strategies for applying proper Inherited Policies down to the lower levels of the herarchy:
- Atomic policy rules: the bare set of policy rules in a
defaults
or overrides
block is applied as an atomic piece; i.e., a lower object than the target of the policy, that is evaluated to be potentially affected by the policy, also has an atomic set of rules if another policy is attached to this object, therefore either the entire set of rules declared by the higher (less specific) policy is taken or the entire set of rules declared by the lower (more specific) policy is taken (depending if it's defaults
or overrides
), but the two sets are never merged into one. - Merged policy rules: each individual policy rule within a
defaults
or overrides
block is compared one to one against lower level policy rules and, when they conflict (i.e. have the same key with different values), either one or the other (more specific or less specific) is taken (depending if it's defaults
or overrides
), in a way that the final effective policy is a merge between the two policies.
Each block of defaults
and overrides
must specify a strategy
field whose value is set to either atomic
or merge
. If omitted, atomic
is assumed.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#level-of-granularity-of-compared-policy-rules","title":"Level of granularity of compared policy rules","text":"Atomic versus merge strategies, as a specification of the defaults
and overrides
blocks, imply that there are only two levels of granularity for comparing policies vis-a-vis.
-
atomic
means that the level of granularity is the entire set of policy rules within the defaults
or overrides
block. I.e., the policy is atomic, or, equivalently, the final effective policy will be either one indivisible (\"atomic\") set of rules (\"policy\") or the other.
-
For the merge
strategy, on the other hand, the granularity is of each named policy rule, where the name of the policy rule is the key and the value is an atomic object that specifies that policy rule. The final effective policy will be a merge of two policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#matrix-of-do-strategies-and-effective-policy","title":"Matrix of D/O strategies and Effective Policy","text":"When two policies are compared to compute a so-called Effective Policy out of their sets of policy rules and given default or override semantics, plus specified atomic
or merge
strategies, the following matrix applies:
Atomic (entire sets of rules) Merge (individual policy rules at a given granularity) Defaults More specific entire set of rules beats less specific entire set of rules \u2192 takes all the rules from the lower policy More specific individual policy rules beat less specific individual set of rules \u2192 compare one by one each pair of policy rules and take the lower one if they conflict Overrides Less specific entire set of rules beats more specific entire set of rules \u2192 takes all the rules from the higher policy Less specific individual policy rules beat more specific individual set of rules \u2192 compare one by one each pair of policy rules and take the higher one if they conflict The order of the policies, from less specific (or \"higher\") to more specific (or \"lower), is determined according to the Gateway API hierarchy of network resources, based on the kind of the object targeted by the policy. The policy that sets higher in the hierarchy dictates the strategy to be applied.
For a more detailed reference, including how to resolve conflicts in case of policies targeting objects at the same level, see GEP-713's section Hierarchy and Conflict Resolution.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#unsetting-inherited-defaults","title":"Unsetting inherited defaults","text":"In some cases, it may be desirable to be able to unset, at a lower policy, a merged default that is inherited from a higher one. In fact, some inherited defaults could be harmful to an application, at the same time as they are unfeasible to remove from scope for all applications altogether, and therefore require an exception.
Unsetting defaults via specification at lower level policies provides users who own policy rules at different levels of the hirarchy the option of not having to coordinate those exceptions \"offline\", nor having to accept the addition of special cases (conditions) at the higher level to exempt only specific lower policies from being affected by a particular default, which otherwise would configure a violation of the inheritance pattern, as well as an imposition of additional cognitive complexity for one who reads a higher policy with too many conditions.
Instead, users should continue to be able to declare their intents through policies, and redeem an entitlement to unset unapplicable defaults, without any leakage of lower level details upwards at the higher policies.
The option of unsetting inherited defaults is presented as part of the volition implied by the inheritance of policy rules, which are tipically specified for the more general case (e.g. at the level of a gateway, for all routes), though not necessarily applicable for all special cases beneath. If enabled, this feature helps disambiguate the concept of \"default\", which should not be understood strictly as the option to set values that protect the system in case of lack of specialisation. Rather, by its property of volition and changeability. I.e., by definition, every default policy rule is opt-out and specifies a value that is modifiable.
In constrast, a policy rule that is neither opt-out nor modifiable better fits the definition of an override. While a policy rule that is not opt-out, nor it sets a concrete default value to be enforced in the lack of specialisation, defines a requirement.
Finally, for the use case where users want to set defaults that cannot be unset (though still modifable), the very feature of unsetting defaults itself should be configurable, at least at the level of the system. This can be achieved with feature switches and policy validation, including backed by the cluster's RBAC if needed.
The capability of unsetting inherited defaults from an effective policy can be identified by the presence of the spec.unset
field in a policy. The value is a list of default named policy rules to be unset.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#conditionally-applying-do","title":"Conditionally applying D/O","text":"Users should be able to specify conditions for applying their blocks of defaults
and overrides
. These conditions aim to support exceptional cases where the blocks cannot be simply applied downwards, but rather depend on specifics found in the lower policies, while still defined in generic terms \u2013 as opposed to conditions that leak details of individual lower policies upwards.
Between a higher and a lower set of policy rules, the higher level dictates the conditions for its rules to be applied (either as defaults or as overrides) over the lower level, and never the other way around.
D/O conditions are identfied by the presence of the spec.defaults.when
or spec.overrides.when
fields in a policy. Those should be defined using Common Expression Language (CEL), evaluated in the control plane against the lower level specification that the higher level is being applied to. I.e. self
in the CEL expression is the lower policy.
A concrete useful application for conditionally enforcing a block of D/O is for specifying constraints for lower values. E.g. if a lower policy tries to set a value on a numeric field that is greater (or lower) than a given threshold, apply an override that sets that field value to equal to the threshold; otherwise, use the value declared by the lower policy.
In contrast, an example of trivially redundant application of D/O conditions would be specifying a default block of rules that is only applied when the lower level does not declare a more specific replacement. Since this is natural semantics of a default, one does not have to use conditions for that.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-of-do-cases","title":"Examples of D/O cases","text":"The following sets of examples generalize D/O applications for the presented user stories, regardless of details about specific personas and kinds of targeted resources. They illustrate the expected behavior for different cases involving defaults, overrides, constraints and unsetting.
Examples Highlighted user stories A. Default policy entirely replaced by another at lower level gateway-default-policy, route-replace-policy B. Default policy rules merged into policies at lower level gateway-default-policy-rule, route-add-policy-rule C. Override policy entirely replacing other at lower level gateway-override-policy D. Override policy rules merged into other at lower level gateway-override-policy-rule E. Override policy rules setting constraints to other at lower level policy-constraints F. Policy rule that unsets a default from higher level route-unset-policy-rule In all the examples, a Gateway and a HTTPRoute objects are targeted by two policies, and an effective policy is presented highlighting the expected outcome. This poses no harm to generalizations involving same or different kinds of targeted resources, multiples policies targeting a same object, etc.
The leftmost YAML is always the \"higher\" (less specific) policy; the one in the middle, separated from the leftmost one by a \"+\" sign, is the \"lower\" (more specific) policy; and the rightmost YAML is the expected Effective Policy.
For a complete reference of the order of hierarchy, from least specific to most specific kinds of resources, as well as how to resolve conflicts of hierarchy in case of policies targeting objects at the same level, see Gateway API's Hierarchy definition for Policy Attachment and Conflict Resolution.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-a-default-policy-entirely-replaced-by-another-at-lower-level","title":"Examples A - Default policy entirely replaced by another at lower level","text":"Example A1. A default policy that is replaced entirely if another one is set at a lower level
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-b-default-policy-rules-merged-into-policies-at-lower-level","title":"Examples B - Default policy rules merged into policies at lower level","text":"Example B1. A default policy whose rules are merged into other policies at a lower level, where individual default policy rules can be overridden or unset - without conflict
Example B2. A default policy whose rules are merged into other policies at a lower level, where individual default policy rules can be overridden or unset - with conflict
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-c-override-policy-entirely-replacing-other-at-lower-level","title":"Examples C - Override policy entirely replacing other at lower level","text":"Example C1. An override policy that replaces any other that is set at a lower level entirely
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-d-override-policy-rules-merged-into-other-at-lower-level","title":"Examples D - Override policy rules merged into other at lower level","text":"Example D1. An override policy whose rules are merged into other policies at a lower level, overriding individual policy rules with same identification - without conflict
Example D2. An override policy whose rules are merged into other policies at a lower level, overriding individual policy rules with same identification - with conflict
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-e-override-policy-rules-setting-constraints-to-other-at-lower-level","title":"Examples E - Override policy rules setting constraints to other at lower level","text":"The examples in this section introduce the proposal for a new when
field for the defaults
and overrides
blocks. This field dictates the conditions to be found in a lower policy that would make a higher policy or policy rule to apply, according to the corresponding defaults
or overrides
semantics and atomic
or merge
strategy.
Combined with a simple case of override policy (see Examples C), the when
condition field allows modeling for use cases of setting constraints for lower-level policies.
As here proposed, the value of the when
condition field must be a valid Common Expression Language (CEL) expression.
Example E1. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - lower policy is compliant with the constraint
Example E2. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - lower level violates the constraint
Example E3. An override policy whose rules set constraints to field values of other policies at a lower level, overriding individual policy values of rules with same identification if those values violate the constraints - merge granularity problem
The following example illustrates the possibly unintended consequences of enforcing D/O at strict levels of granularity, and the flip side of the strategy
field offering a closed set of options (atomic
, merge
).
On one hand, the API is simple and straightforward, and there are no deeper side effects to be concerned about, other than at the two levels provided (atomic sets or merged individual policy rules.) On the other hand, this design may require more offline interaction between the actors who manage conflicting policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#examples-f-policy-rule-that-unsets-a-default-from-higher-level","title":"Examples F - Policy rule that unsets a default from higher level","text":"The examples in this section introduce a new field unset: []string
at the same level as the bare set of policy rules. The value of this field, provided as a list, dictates the default policy rules declared at a higher level to be removed (\"unset\") from the effective policy, specified by name of the policy rules.
Example F1. A policy that unsets a default policy rule set at a higher level
Example F2. A policy that tries to unset an override policy rule set a higher level
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#status-reporting-and-policy-discoverability","title":"Status reporting and Policy discoverability","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#possible-statuses-of-an-inherited-policy","title":"Possible statuses of an inherited policy","text":"An inherited policy can be at any of the following conditions (RFC 0004):
Type Status Reason Message Accepted True \"Accepted\" \"Policy has been accepted\" False \"Conflicted\" \"Policy is conflicted by <policy-ns/policy-name>\" False \"Invalid\" \"Policy is invalid\" False \"TargetNotFound\" \"Policy target <resource-name> was not found\" Enforced True \"Enforced\" \"Policy has been successfuly enforced[. The following defaults have been added by : x, y]\" True \"PartiallyEnforced\" \"Policy has been successfuly enforced. The following rules have been overridden by : a, b[; the following defaults have been added by : x, y]\" False \"Overridden\" \"Policy has been overridden by <policy-ns/policy-name>\" False \"Unknown\" \"Policy has encountered some issues\""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-discoverability-and-effective-policy","title":"Policy discoverability and Effective policy","text":"A special condition must be added to every object that is targeted by a Kuadrant inherited policy if the policy's Enforced
status condition is True
.
This special condition to be added to the target object is kuadrant.io/xPolicyAffected
, where \"xPolicy\" is the kind of the inherited policy (e.g. AuthPolicy, RateLimitPolicy.)
The possible statuses of an object regarding its sensitivity to one or more inherited policies are:
Type Status Reason Message xPolicyAffected False \"Unaffected\" \"The object is not affected by any xPolicy\" True \"Affected\" \"The object is affected by xPolicy <policy-ns/policy-name>\" True \"PartiallyAffected\" \"The following sections of the object are affected by xPolicy <policy-ns/policy-name>: rules.0, rules.2\" The presence of the PolicyAffected
status condition helps identify that an object is sensitive to one of more policies of a kind, and gives some specifics about the scope of that effect (entire object or selected sections.) In many cases, this should be enough for inferring the actual policy rules being enforced for that object.
For other cases where any of the following situations hold, a more detailed view of the final Effective Policy must be provided to the user:
- If the rules of the policy cannot be inferred by the name of the policy and/or the user lacks permission to read the policy object;
- If the object is affected by more than one policy.
To help visualize the effective policy for a given target object in that situation, at least one of the following options must be provided to the user:
- A read-only
EffectivePolicy
custom resource, defined for each kind of inherited policy, and with an instance created for each affected object, that is reconciled and updated by the policy controller. - A HTTP endpoint of the policy controller that users can consume to read the effective policy.
- A CLI tool that offers a command that queries the cluster and returns the effective policy \u2013 either by leveraging any of the methods above or computing the effective policy \"on-the-fly\" in the same fashion as the policy controller does.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#reference-level-explanation","title":"Reference-level explanation","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#applying-policies","title":"Applying policies","text":"The following diagrams are a high level model to guide the process of applying a set of policies of a kind for a given Gateway object, where the Gateway object is considered the root of a hierarchy, and for all objects beneath, being the xRoute objects the leaves of the hierarchical tree.
As presented, policies can target either Gateways of route objects (HTTPRoutes, GRPCRoutes), with no restriction regarding the number of policies of a kind that target a same particular object. I.e. N:1 relationship allowed. Without any loss of generality, 1:1 relationship between policies of a kind and targeted objects can be imposed if preferred as a measure to initially reduce the blast of information for the user and corresponding cognitive load.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#apply-policies-to-a-gateway-root-object-and-all-objects-beneath","title":"Apply policies to a Gateway (root object) and all objects beneath","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n start([For a Gateway <i>g</i><br>and policy kind <i>pk</i>]) -->\n list-routes[List all routes<br>accepted by <i>g</i> as <i>R</i>] -->\n apply-policies-for-r\n subgraph for-each-route[For each <i>r in R</i>]\n apply-policies-for-r[[Apply policies<br>of kind <i>pk</i><br>scoped for <i>r</i>]] -->\n apply-policies-for-r\n end\n for-each-route -->\n build-virtual-route[Build a virtual route <i>vr</i><br>with all route rules not<br>target by any policy] -->\n apply-policies-for-vr[[Apply policies<br>of kind <i>pk</i><br>scoped for <i>vr</i>]] -->\n finish(((END)))
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#apply-policies-of-a-kind-for-an-object","title":"Apply policies of a kind for an object","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n apply-policies-for-o-start([Apply policies of kind <i>pk</i><br>scoped for an object <i>o</i>]) -->\n list-policies[Make <i>P</i> \u2190 all policies <br>of kind <i>pk</i> that<br>affect <i>o</i>] -->\n sort-policies[Sort <i>P</i> from<br>lowest to highest] -->\n build-effective-policy[Build an effective<br>policy <i>ep</i> without<br>any policy rules] -->\n merge-p-into-ep\n subgraph for-each-policy[For each policy <i>p in P</i>]\n merge-p-into-ep[[Merge <i>p into <i>ep</i>]] -->\n merge-p-into-ep\n end\n for-each-policy -->\n reconcile-ep[Reconcile resources<br>for <i>ep</i>] -->\n apply-policies-for-o-finish(((END)))
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-two-policies-together","title":"Merging two policies together","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-p1-into-p2-start([Merge policy <i>p1</i><br>into policy <i>p2</i>]) -->\n p1-format{Explicit<br><i>defaults</i> or <i>overrides</i><br>declared in <i>p1</i>?}\n p1-format -- Yes --> merge-defaults-for-r[[\"Merge <b>defaults</b> block<br>of policy rules<br>of <i>p1</i> into <i>p2</i>\"]] --> merge-overrides-for-r[[\"Merge <b>overrides</b> block<br>of policy rules<br>of <i>p1</i> into <i>p2</i>\"]] --> merge-p1-into-p2-finish(((Return <i>p2</i>)))\n p1-format -- No --> merge-bare-rules-for-r[[\"Merge ungrouped<br>block of policy rules<br>of <i>p1</i> into <i>p2</i><br>(as <b>defaults</b>)\"]] --> merge-p1-into-p2-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-a-generic-block-of-policy-rules-defaults-or-overrides-into-a-policy-with-conditions","title":"Merging a generic block of policy rules (defaults or overrides) into a policy with conditions","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-block-of-rules-into-p-start([Merge block of<br>policy rules <i>B</i><br>into policy <i>p</i>]) -->\n r-conditions-match{\"<i>B.when(p)</i>\"}\n r-conditions-match -- \"Conditions do not match\" --> merge-block-of-rules-into-p-finish(((Return <i>p</i>)))\n r-conditions-match -- \"Conditions match\" --> block-semantics{Merge <i>B</i> as}\n block-semantics -- \"Defaults\" --> merge-default-block-into-p[[Merge default block<br>of policy rules <i>B</i><br>into policy <i>p</i>]] --> merge-block-of-rules-into-p-finish\n block-semantics -- \"Overrides\" --> merge-override-block-into-p[[Merge override block<br>of policy rules <i>B</i><br>into policy <i>p</i>]] --> merge-block-of-rules-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merge-a-defaults-block-of-policy-rules-into-a-policy","title":"Merge a defaults
block of policy rules into a policy","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-default-block-into-p-start([Merge default block<br>of policy rules <i>B</i><br>into policy <i>p</i>]) -->\n unset-unwanted-policy-rules[Remove from <i>B</i><br>all policy rules<br>listed in <i>p.unset</i>] -->\n p-empty{<i>p.empty?</i>}\n p-empty -- \"Yes\" --> full-replace-p-with-defaut-block[<i>p.rules \u2190 B</i>] --> merge-default-block-into-p-finish(((Return <i>p</i>)))\n p-empty -- \"No\" --> default-block-strategy{<i>B.strategy</i>}\n default-block-strategy -- \"Atomic\" --> merge-default-block-into-p-finish\n default-block-strategy -- \"Merge\" --> default-p-r-exists\n subgraph for-each-default-policy-rule[\"For each <i>r in B<i>\"]\n default-p-r-exists{\"<i>p[r.id].exists?</i>\"}\n default-p-r-exists -- \"Yes\" --> default-p-r-exists\n default-p-r-exists -- \"No\" --> default-replace-pr[\"<i>p[r.id] \u2190 r</i>\"] --> default-p-r-exists\n end\n for-each-default-policy-rule -->\n merge-default-block-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merge-an-overrides-block-of-policy-rules-into-a-policy","title":"Merge an overrides
block of policy rules into a policy","text":"%%{ init: { \"theme\": \"neutral\" } }%%\nflowchart LR\n merge-override-block-into-p-start([Merge override block<br>of policy rules <i>B</i><br>into policy <i>p</i>]) -->\n override-block-strategy{<i>B.strategy</i>}\n override-block-strategy -- \"Atomic\" --> full-replace-p-with-override-block[<i>p.rules \u2190 B</i>] --> merge-override-block-into-p-finish(((Return <i>p</i>)))\n override-block-strategy -- \"Merge\" --> override-replace-pr\n subgraph for-each-override-policy-rule[\"For each <i>r in B<i>\"]\n override-replace-pr[\"<i>p[r.id] \u2190 r</i>\"] --> override-replace-pr\n end\n for-each-override-policy-rule -->\n merge-override-block-into-p-finish
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#implementation-tiers","title":"Implementation tiers","text":"This section proposes a possible path for the implementation of this RFC for Kuadrant's existing kinds of policies that are affected by D/O \u2013 notably AuthPolicy and RateLimitPolicy.
The path is divided in 3 tiers that could be delivered in steps, additionaly to a series of enhancements & refactoring.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-1","title":"Tier 1","text":" - Atomic defaults (currently supported; missing addition of the
defaults
field to the APIs) - Atomic overrides
- Policy status and Policy discoverability (i.e. PolicyAffected status on target objects)
- CRD labels
gateway.networking.k8s.io/policy: inherited | direct
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-2","title":"Tier 2","text":" - D/O
when
conditions (and support for \"constraints\") - Merge strategy
- Reporting of effective policy
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#tier-3","title":"Tier 3","text":" - Unsetting (
unset
) - Metrics for D/O policies (control plane)
- Docs: possible approaches for \"requirements\"
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#enhancements-and-refactoring","title":"Enhancements and refactoring","text":" - Extract generic part of D/O implementation to Kuadrant/gateway-api-machinery.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#drawbacks","title":"Drawbacks","text":"See Mutually exclusive API designs > Design option: strategy
field.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#rationale-and-alternatives","title":"Rationale and alternatives","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#mutually-exclusive-api-designs","title":"Mutually exclusive API designs","text":"The following alternatives were considered for the design of the API spec to support D/O:
strategy
field - RECOMMENDED granularity
field when
conditions (at any level of the spec) - CEL functions (at any level of the spec)
- \u201cpath-keys\u201d
- JSON patch-like
All the examples in the RFC are based on API design strategy
field.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-strategy-field","title":"Design option: strategy
field","text":"Each block of defaults
and overrides
specify a field strategy: atomic | merge
, with atomic
assumed if the field is omitted.
All the examples in the RFC are based on this design for the API spec.
Some of the implications of the design are explained in the section Atomic vs. individually merged policy rules, with highlights to the support for specifying the level of atomicity of the rules in the policy based on only 2 granularities \u2013 entire set of policy rules (atomic
) or to the level of each named policy rule (merge
.)
\u2705 Pros \u274c Cons - Same schema as a normal policy without D/O
- Declarative
- Safe against \"unmergeable objects\" (e.g. two rules declaring different one-of options)
- Strong types
- Extensible (by adding more fields, e.g.: to support unsetting defaults)
- Easy to learn
- 2 levels of granularity only \u2013 either all (\u2018atomic\u2019) or policy rule (\u2018merge\u2019)
- 1 granularity declaration per D/O block \u2192 declaring both \u2018atomic\u2019 and \u2018merge\u2019 simultaneously requires 2 separate policies targeting the same object
The design option based on the strategy
field is the RECOMMENDED design for the implementation of Kuadrant Policies enabled for D/O. This is due to the pros above, plus the fact that this design can evolve to other, more versatile forms, such as granularity
field, when
conditions or CEL functions, in the future, while the opposite would be harder to achieve.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-granularity-field","title":"Design option: granularity
field","text":"Each block of defaults
and overrides
would specify a granularity
field, set to a numeric integer value that describes which level of the policy spec, from the root of the set of policy rules until that number of levels down, to treat as the key, and the rest as the atomic value.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n authorization:\n \"b\": {\u2026}\n granularity: 0 # the entire spec (\"rules\") is an atomic value\n overrides:\n rules:\n metadata:\n \"c\": {\u2026}\n response:\n \"d\": {\u2026}\n granularity: 2 # each policy rule (\"c\", \"d\") is an atomic value\n
\u2705 Pros \u274c Cons - Same as design option
strategy
field - Unlimited levels of granularity (values can be pointed as atomic at any level)
- 1 granularity declaration per D/O block \u2192 N levels simultaneously require N policies
- Granularity specified as a number - user needs to count the levels
- Setting a deep level of granularity can cause merging \"unmergeable objects\"
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-when-conditions-at-any-level-of-the-spec","title":"Design option: when
conditions (at any level of the spec)","text":"Inspired by the extension of the API for D/O with an additional when
field (see Examples E), this design alternative would use the presence of this field to signal the granularity of the atomic operation of default or override.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026}\n when: CEL # level 1 - entire \"authentication\" block\n authorization:\n \"b\":\n \"prop-1\": {\u2026}\n when: CEL # level 2 - \"b\" authorization policy rule\n
\u2705 Pros \u274c Cons - Same as
granularity
field - As many granularity declarations per D/O block as complex objects in the policy
- Granularity specified \u201cin-place\u201d
- Setting a deep level of granularity can cause merging \"unmergeable objects\"
- Implementation nightmare - hard to define the API from existing types
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-cel-functions-at-any-level-of-the-spec","title":"Design option: CEL functions (at any level of the spec)","text":"This design option leans on the power of Common Expression Language (CEL), extrapolating the design alternative with when
conditions beyond declaring a CEL expression just to determine if a statically declared value should apply. Rather, it proposes the use of CEL functions that outputs the value to default to or to ovrride with, taking the conflicting \"lower\" value as input, with or without a condition as part of the CEL expression. The value of a key set to a CEL function indicates the level of granularity of the D/O operation.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n rules:\n authentication:\n \"a\": {\u2026} # static value\n \"b\": \"cel:self.value > 3 ? AuthenticationRule{value: 3} : self\"\n authorization: |\n cel:Authorization{\n c: AuthorizationRule{prop1: \"x\"}\n }\n
\u2705 Pros \u274c Cons - Unlimited levels of granularity
- Granularity specified \u201cin-place\u201d
- Extremely powerful
- Elegant and simple implementation-wise
- Weakly typed
- Implementation completely new \u2013 cannot reuse current API types
- Requires all types to be defined as protobufs
- Without strong guardrails, users can easily shoot themselves in the foot
- Validation likely requires complex functions for parsing the CEL expressions
- Non-declarative
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-path-keys","title":"Design option: \u201cpath-keys\u201d","text":"A more radical alternative considered consisted of defining defaults
and overrides
blocks whose schemas would not match the ones of a normal policy without D/O. Instead, these blocks would consist of simple key-value pairs, where the keys specify the paths in an affected policy where to apply the value atomically.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n \"rules.authentication\":\n \"a\": {G}\n \"rules.authorization.b\": {G}\n
\u2705 Pros \u274c Cons - D/O as simple key-value sets (keys: where to apply, values: what to apply)
- Declarative
- Unlimited levels of granularity (values can be pointed as atomic at any level)
- Unlimited merge declarations per D/O block
- Intuitive, easy-to-learn
- Not same schema as the normal policy (without D/O) - not very GWAPI-like
- Weakly typed (i.e.
map[string]any)
- Not extensible (e.g., cannot add other fields to the API)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#design-option-json-patch-like","title":"Design option: JSON patch-like","text":"Similar to the path-keys design option, inspired by JSON patch operations, to provide more kinds of operations and extensibility.
Example:
kind: AuthPolicy\nmetadata:\n name: gw-policy\nspec:\n targetRef:\n kind: Gateway\n defaults:\n\n - path: rules.authentication\n operation: add\n value: { \"a\": {G} }\n - path: rules.authorization.b\n operation: remove\n - path: |\n rules.authentication.a.\n value\n operation: le\n value: 50\n
\u2705 Pros \u274c Cons - Same as \"path-keys\" field
- Extensible, all kinds of operations supported (add, remove, constraint)
- Not same schema as the normal policy (without D/O) - not very GWAPI-like
- Less declarative
- Weakly typed (i.e.
value: any)
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#prior-art","title":"Prior art","text":"Other than the primitive support only for implicit atomic defaults provided by Kuadrant for the AuthPolicy and RateLimitPolicy, other real-life implementations of D/O along the lines proposed by Gateway API are currently unknown.
Some orientative examples provided in:
- GEP-2649 - search for \"CDNCachingPolicy\" as well as \"Merging into existing spec fields\";
gwctl
effective policy calculation for inherited policies - see policy manager's merge test cases.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#out-of-scope","title":"Out of scope","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-requirements","title":"Policy requirements","text":"A use case often described in association with D/O is the one for declaring policy requirements. These are high level policies that declare requirements to be fulfilled by more specific (lower level) policies without specifying concrete default or override values nor constraints. E.g.: \"an authentication policy must be enforced, but none is provided by default.\"
A typical generic policy requirement user story is:
As a Platform Engineer, when configuring a Gateway, I want to set policy requirements to be fulfilled by one who manages an application/route linked to my Gateway, so all interested parties, including myself, can be aware of applications deployed to the cluster that lack a particular policy protection being enforced.
Policy requirements as here described are out of scope of this RFC.
We believe policy requirement use cases can be stated and solved as an observability problem, by defining metrics and alerts that cover for missing policies or policy rules, without necessarily having to write a policy of the same kind to express such requirement to be fulfilled.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#unresolved-questions","title":"Unresolved questions","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#merging-policies-with-references-to-external-objects","title":"Merging policies with references to external objects","text":"How to handle merges of policies from different namespaces that contain references to other objects (e.g. Secrets)?
Often policies rules include references to other Kubernetes objects, such as Secrets, typically defined in the same namespace as the policy object. When merging policies from different namespaces, these references need to be taken into account.
If not carried along with the derivative resources (e.g. Authorino AuthConfig objects) that are created from a merge of policies (or from the computed effective policy), composed out of definitions from different namespaces, and that depend on those references, these references to external objects can be broken.
This is not much of a problem for atomic D/O only, as the derivative objects that depend on the references could be forced to be created in the same namespace as the policy that wins against all the others \u2013 and therefore in the same namespace of the winning referents as well. However, when merging policies, we can run into a situation where final effective policies (thus also other derivative resources) contain references to objects inherited from definitions from other namespaces.
Possible solutions to this problem include:
- Copying the referenced objects into the namespace where the derivative resources will be created.
- Involves maintaining (watching and reconciling) those referenced objects
- May raise security concerns
- Allowing derivative resources (e.g. Authorino AuthConfigs) to reference objects across namespaces, as well as giving permissions to the components that process those references (e.g. Authorino) to read across namespaces
- May raise security concerns
- Should probably be restricted to derivative resources created by Kuadrant and not allowed to users who create the derivative resources themselves
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#policy-spec-resembling-more-the-target-spec","title":"Policy spec resembling more the target spec","text":"Should Kuadrant's inherited policy specs resemble more the specs of the objects they target?
The UX for one who writes a Kuadrant policy of the inherited class of policies is arguably not very different from writing any custom resource that happens to specify a targetRef
field. Other than name and kind of target object, there is nothing much in a Kuadrant policy custom resource that provides the user with an experience almost close to be \"adding fields\" in the target object.
With the exception of a few types reused for the route selectors, the spec of a Kuadrant policy is very different from the spec of the object that ultimately the policy augments, i.e. the spec of the route object. This remains basically unchanged after this RFC. However, another way to think on the design of those APIs is one where, in contrast, the specs of the policies partially mirror the spec of the route, so users can write policies in a more intuitive fashion, as if the definitions of the policy would look like extensions of the routes they target (directly or by targeting gateways the routes are attached to.)
E.g.:
kind: HTTPRoute\nmetadata:\n name: my-route\nspec:\n rules:\n\n - name: rule-1\n matches:\n - method: GET\n backendRef: {\u2026}\n - name: rule-2\n backendRef: {\u2026}\n
An inherited policy that targets the HTTPRoute above could otherwise look like the following:
kind: Policy\nmetadata:\n name: my-policy\nspec:\n targetRef:\n kind: HTTPRoute\n name: my-route\n defaults: # mirrors the spec of the httproute object\n policySpecificDef: {\u2026} # augments the entire httproute object\n overrides: # mirrors the spec of the httproute object\n rules:\n\n - name: rule-2\n policySpecificDef: {\u2026} # augments only httprouterule rule-2 of the httproute object\n
The above already is somewhat closer to being true for the AuthPolicy API, than it is for the RateLimitPolicy one. However, that is strictly coincidental, because the AuthPolicy's spec happens to specify a rules
field, where the equivalent at the same level in RateLimitPolicy is called limits
.
This alternative design could make writing policies more like defining filters in an HTTPRoute, with the difference that policies are external to the target they extend (while filters are internal.) At the same time, it could be a replacement for Kuadrant route selectors, where the context of applicability of a policy rule is given by the very structure within the spec how the policy rule is declared (resembling the one of the target), thus also would shaping context for D/O.
One caveat of this design though is that each policy specific definition (i.e. the rule specification that extends the object at a given point defined by the very structure of the spec) is exclusive of that given point in the structure of the object. I.e., one cannot specify a single policy rule that augments N > 1 specific rules of a target HTTPRoute.
Due to its relevance to the design of the API that enables D/O, this was left as an unresolved question. To be nonetheless noticed that, as a pattern, this alternative API design extends beyond inherited policies, impacting as well the direct policy kinds DNSPolicy and TLSPolicy.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#future-possibilities","title":"Future possibilities","text":""},{"location":"architecture/rfcs/0009-defaults-and-overrides/#n1-policy-target-relationship","title":"N:1 policy-target relationship","text":"Although this proposal was thought to keep options open for multiple policies of a kind targeting a same network resource, this is currently not the state of things for Kuadrant. Instead, Kuadrant enforces 1:1 relationship between policies of a kind and target resources.
Supporting N:1 relationships could enable use cases such as of App Developers defining D/O for each other at the same level of a shared xRoute, as well as Platform Engineers setting different policy rules on the same Gateway.
This could provide an alternative to achieving separation of concerns for complex policy kinds such as the AuthPolicy, where different users could be responsible for authentication and authorization, without necessarily depending on defining new kinds of policies.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#route-rule-name-and-targetrefsectionname","title":"Route rule name
and targetRef.sectionName
","text":"If Gateway API's GEP-995 is accepted (i.e. kubernetes-sigs/gateway-api#2593 gets merged) and the name
field for route rules implemented in the APIs (HTTPRoute and GRPCRoute), this could impact how Kuadrant delivers D/O. Although the semantics could remain the same, the users specify the scope for a given set of policy rules could simplify significantly.
As of today, Kuadrant's AuthPolicy and RateLimitPolicy APIs allow users to target sections of a HTTPRoute based on route selectors, and thus all the conflict resolution involved in handling D/O must take that logics into account.
With named route rules supported by Gateway API, either route selectors could be redefined in a simpler form where each selector consists of a list of names of rules and/or entire policies could be scoped for a section of a resource, by defining the targetRef
field based on the PolicyTargetReferenceWithSectionName
type.
To be noted GEP-2649's recommendation of not defining inherited policies that allow for sectionName
in the targetRef
. Nonetheless, this is a general rule from the spec said to be acceptable to be broken in the spirit of offering better functionality to users, provided it can deal with the associated discoverability and complexity problems of this feature.
"},{"location":"architecture/rfcs/0009-defaults-and-overrides/#use-listmaptype-instead-of-maps-of-policy-rules","title":"Use listMapType instead of maps of policy rules","text":"Despite having recently modified the AuthPolicy and RateLimitPolicy APIs to use maps for declaring policy rules instead of lists (RFC 0001), reverting this design in future versions of these APIs, plus treating those lists as listMapType
, could let us leverage the API server's strategic merge type to handle merges between policy objects.
In the Policy CRDs, the policy rule types must specify a name
field (required). The list of rules type (i.e. []Rule
) must then speficy the following Kubebuilder CRD processing annotations:
// +listType=map\n// +listMapKey=name\n
-
As the time of writing, GEP-713 (Kubernetes Gateway API, SIG-NETWORK) is under revision, expected to be split into two separate GEPs, one for Direct Policies (GEP-2648) and one for Inherited Policies (GEP-2649.) Once these new GEPs supersede GEP-713, all references to the previous GEP in this document must be updated to GEP-2649.\u00a0\u21a9
"},{"location":"api-quickstart/","title":"API Quickstart","text":""},{"location":"api-quickstart/#introduction","title":"Introduction","text":"This document details how to setup a local reference architecture, and design and deploy an API. This will show the following API management features in a kube native environment using Kuadrant and other open source tools:
- API design
- API security and access control
- API monitoring
- Traffic management and scalability
The sections in this document are grouped by the persona that is typically associated with the steps in that section. The 3 personas are:
- The platform engineer, who provides and maintains a platform for application developers,
- the application developer, who designs, builds and maintains applications and APIs,
- and the api consumer, who makes API calls to the API
"},{"location":"api-quickstart/#pre-requisities","title":"Pre-requisities","text":" docker
: https://www.docker.com/products/docker-desktop/ kind
: https://kind.sigs.k8s.io/ kubectl
: https://kubernetes.io/docs/reference/kubectl/ kustomize
: https://kustomize.io/ helm
: https://helm.sh/docs/intro/install/ operator-sdk
: https://sdk.operatorframework.io/docs/installation/ - An AWS account with a Secret Access Key and Access Key ID. You will also need to a Route 53 zone.
"},{"location":"api-quickstart/#platform-engineer-platform-setup","title":"(Platform engineer) Platform Setup","text":"Export the following env vars:
export KUADRANT_AWS_ACCESS_KEY_ID=<key_id>\nexport KUADRANT_AWS_SECRET_ACCESS_KEY=<secret>\nexport KUADRANT_AWS_REGION=<region>\nexport KUADRANT_AWS_DNS_PUBLIC_ZONE_ID=<zone>\nexport KUADRANT_ZONE_ROOT_DOMAIN=<domain>\n
Clone the api-quickstart repo and run the quickstart script:
git clone git@github.com:Kuadrant/api-quickstart.git && cd api-quickstart\n./quickstart.sh\n
This will take several minutes as 3 local kind clusters are started and configured in a hub and spoke architecture. The following components will be installed on the clusters:
- Hub
- Open Cluster Management, as a 'hub' cluster
- Kuadrant Multi Cluster Gateway Controller, for managing a Gateway in multiple clusters centrally
- Gatekeeper, for constraints on Gateway Policy requirements
- Thanos, for receiving metrics centrally
- Grafana, for visualising API & Gateway metrics
- Spoke x2
- Open Cluster Management, as a 'spoke' cluster
- Kuadrant Operator, for auth and rate limiting policies attached to a HTTPRoute
- Istio, with the Gateway API CRDs as the Gateway for ingress trafic
- MetalLB, for exposing the Gateway service on the local network
- Prometheus, for scraping and federating metrics to the hub
"},{"location":"api-quickstart/#verify-the-gateway-and-configuration","title":"Verify the Gateway and configuration","text":"View the ManagedZone, Gateway and TLSPolicy. The ManagedZone and TLSPolicy should have a Ready status of true. The Gateway should have a Programmed status of True.
kubectl --context kind-api-control-plane get managedzone,tlspolicy,gateway -n multi-cluster-gateways\n
"},{"location":"api-quickstart/#guard-rails-constraint-warnings-about-missing-policies-dns-tls","title":"Guard Rails: Constraint warnings about missing policies ( DNS, TLS)","text":"Running the quick start script above will bring up Gatekeeper and the following constraints:
- Gateways must have a TLSPolicy targeting them
- Gateways must have a DNSPolicy targeting them
To view the above constraints in kubernetes, run this command:
kubectl --context kind-api-control-plane get constraints\n
Info
Since a gateway has been created automatically, along with a TLSPolicy
, the violation for a missing DNSPolicy
will be active until one is created.
"},{"location":"api-quickstart/#grafana-dashboard-view","title":"Grafana dashboard view","text":"To get a top level view of the constraints in violation, the Stitch: Platform Engineer Dashboard
can be used. This can be accessed by at https://grafana.172.31.0.2.nip.io
Grafana has a default username and password of admin
. You can find the Stitch: Platform Engineer Dashboard
dashboard in the Default
folder.
"},{"location":"api-quickstart/#create-the-missing-dnspolicy","title":"Create the missing DNSPolicy","text":"Create a DNSPolicy that targets the Gateway with the following command:
kubectl --context kind-api-control-plane apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSPolicy\nmetadata:\n name: prod-web\n namespace: multi-cluster-gateways\nspec:\n targetRef:\n name: prod-web\n group: gateway.networking.k8s.io\n kind: Gateway\n loadBalancing:\n geo:\n defaultGeo: EU\nEOF\n
"},{"location":"api-quickstart/#platform-overview","title":"Platform Overview","text":"Since we have created all the policies that Gatekeeper had the guardrails around, you should no longer see any constraints in violation. This can be seen back in the Stitch: Platform Engineer Dashboard
in Grafana at https://grafana.172.31.0.2.nip.io
"},{"location":"api-quickstart/#application-developer-app-setup","title":"(Application developer) App setup","text":""},{"location":"api-quickstart/#api-design","title":"API Design","text":"Fork and/or clone the Petstore App at https://github.com/Kuadrant/api-petstore
git clone git@github.com:kuadrant/api-petstore && cd api-petstore\n# Or if you forked the repository:\n# git clone git@github.com:<your_github_username>/api-petstore && cd api-petstore\n
Then deploy it to the first workload cluster:
kustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-1 apply -f-\n
This will deploy:
- A
petstore
Namespace - A
Secret
, containing a static API key that we'll use later for auth - A
Service
and Deployment
for our petstore app - A Gateway API
HTTPRoute
for our petstore app
"},{"location":"api-quickstart/#route-53-dns-zone","title":"Route 53 DNS Zone","text":"When the DNS Policy has been created, and the previously created HTTPRoute
has been attached, a DNS record custom resource will also be created in the cluster resulting in records being created in your AWS Route53. Navigate to Route53 and you should see some new records in the zone.
"},{"location":"api-quickstart/#configuring-the-region-label","title":"Configuring the region label","text":"Configure the app REGION
to be eu
:
kubectl --context kind-api-workload-1 apply -k ./resources/eu-cluster/\n
"},{"location":"api-quickstart/#exploring-the-open-api-specification","title":"Exploring the Open API Specification","text":"The raw Open API spec can be found in the root of the repo:
cat openapi.yaml\n# ---\n# openapi: 3.0.2\n# info:\n# title: Stitch API Petstore\n# version: 1.0.18\n
"},{"location":"api-quickstart/#application-developer-api-security","title":"(Application developer) API security","text":"We've included a number of sample x-kuadrant
extensions in the OAS spec already:
- At the top-level of our spec, we've defined an
x-kuadrant
extension to detail the Gateway API Gateway associated with our app:
x-kuadrant:\n route:\n name: petstore\n namespace: petstore\n labels:\n deployment: petstore\n owner: cferreir\n hostnames:\n\n - petstore.$KUADRANT_ZONE_ROOT_DOMAIN\n parentRefs:\n - name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n kind: Gateway\n
- In
/user/login
, we have a Gateway API backendRef
set and a rate_limit
set. The rate limit policy for this endpoint restricts usage of this endpoint to 2 requests in a 10 second window: x-kuadrant:\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n - limit: 2\n duration: 10\n unit: second\n
- In
/store/inventory
, we have also have a Gateway API backendRef
set and a rate_limit
set. The rate limit policy for the endpoint restricts usage of this endpoint to 10 requests in a 10 second window: x-kuadrant:\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n - limit: 10\n duration: 10\n unit: second\n
- Finally, we have a
securityScheme
setup for apiKey auth, powered by Authorino. We'll show this in more detail a little later: securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
These extensions allow us to automatically generate Kuadrant Kubernetes resources, including AuthPolicies, RateLimitPolicies and Gateway API resources such as HTTPRoutes.
"},{"location":"api-quickstart/#kuadrantctl","title":"kuadrantctl","text":"kuadrantctl
is a cli that supports the generation of various Kubernetes resources via OAS specs. Let's run some commands to generate some of these resources. If you forked the api-pestore repo, you can check them in also. Let's apply these to our running workload to implement rate limiting and auth.
"},{"location":"api-quickstart/#installing-kuadrantctl","title":"Installing kuadrantctl
","text":"Download kuadrantctl
from the v0.2.0
release artifacts:
https://github.com/Kuadrant/kuadrantctl/releases/tag/v0.2.0
Drop the kuadrantctl
binary somewhere into your $PATH (e.g. /usr/local/bin/
).
For this next part of the tutorial, we recommend installing yq
to pretty-print YAML resources.
"},{"location":"api-quickstart/#generating-kuadrant-resources-with-kuadrantctl","title":"Generating Kuadrant resources with kuadrantctl
","text":"We'll generate an AuthPolicy
to implement API key auth, per the securityScheme
in our OAS spec:
# Generate this resource and save:\nkuadrantctl generate kuadrant authpolicy --oas openapi.yaml | yq -P | tee resources/authpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/authpolicy.yaml\n
Next we'll generate a RateLimitPolicy
, to protect our APIs with the limits we have setup in our OAS spec:
# Generate this resource and save:\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | yq -P | tee resources/ratelimitpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/ratelimitpolicy.yaml\n
Lastly, we'll generate a Gateway API HTTPRoute
to service our APIs:
# Generate this resource and save:\nkuadrantctl generate gatewayapi httproute --oas openapi.yaml | yq -P | tee resources/httproute.yaml\n\n# Apply this resource to our cluster, setting the hostname in via the KUADRANT_ZONE_ROOT_DOMAIN env var:\nkustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-1 apply -f-\n
"},{"location":"api-quickstart/#check-our-applied-policies","title":"Check our applied policies","text":"Navigate to your app's Swagger UI:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/docs/\n
"},{"location":"api-quickstart/#ratelimitpolicy","title":"RateLimitPolicy","text":"Let's check that our RateLimitPolicy
for the /store/inventory
has been applied and works correctly. Recall, our OAS spec had the following limits applied:
x-kuadrant:\n ...\n rate_limit:\n rates:\n\n - limit: 10\n duration: 10\n unit: second\n
Navigate to the /store/inventory
API, click Try it out
, and Execute
. You'll see a response similar to:
{\n \"available\": 10,\n \"pending\": 5,\n \"sold\": 3\n}\n
This API has a rate limit applied, so if you send more than 10 requests in a 10 second window, you will see a 429
HTTP Status code from responses, and a \"Too Many Requests\" message in the response body. Click Execute
quickly in succession to see your RateLimitPolicy
in action.
"},{"location":"api-quickstart/#authpolicy","title":"AuthPolicy","text":"Let's check that our AuthPolicy
for the /store/admin
endpoint has been applied and works correctly. Recall, our OAS spec had the following securitySchemes applied:
securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
Navigate to the /store/admin
API, click Try it out
, and Execute
. You'll get a 401 response.
You can set a value for the api_key
header by clicking Authorize
at the top of the page. Set a value of secret
. This api key value is stored in the petstore-api-key
Secret in the petstore
namespace. Try the /store/admin
endpoint again and you should get a 200 response with the following:
{\"message\":\"You are an admin!\"}\n
"},{"location":"api-quickstart/#policy-adjustments","title":"Policy Adjustments","text":"Run the Swagger UI editor to explore the OAS spec and make some tweaks:
docker run -p 8080:8080 -v $(pwd):/tmp -e SWAGGER_FILE=/tmp/openapi.yaml swaggerapi/swagger-editor\n
You should be able to access the Swagger Editor at http://localhost:8080. Our /store/inventory
API needs some additonal rate limiting. This is one of our slowest, most expensive services, so we'd like to rate limit it further.
In your openapi.yaml
, navigate to the /store/inventory
endpoint in the paths
block. Modify the rate_limit block to further restrict the amount of requests this endpoint can serve to 2 requests per 10 seconds:
x-kuadrant:\n ...\n rate_limit:\n rates:\n\n - limit: 2\n duration: 10\n unit: second\n
Save your updated spec - File
> Save as YAML
> and update your existing openapi.yaml
. You may need to copy the file from your Downloads folder to the location of the petstore repository.
Next we'll re-generate our RateLimitPolicy
with kuadrantctl
:
# Generate this resource and save:\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | yq -P | tee resources/ratelimitpolicy.yaml\n\n# Apply this resource to our cluster:\nkubectl --context kind-api-workload-1 apply -f ./resources/ratelimitpolicy.yaml\n
At this stage you can optionally check in all the changes to the repo if you forked it.
# Optionally add, commit & push the changes to your fork\ngit add resources\ngit commit -am \"Generated AuthPolicy,RateLimitPolicy & HTTPRoute\"\ngit push # You may need to set an upstream as well\n
In your app's Swagger UI:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/docs/\n
Navigate to the /store/inventory
API one more, click Try it out
, and Execute
.
You'll see the effects of our new RateLimitPolicy
applied. If you now send more than 2 requests in a 10 second window, you'll be rate-limited.
Note: It may take a few minutes for the updated RateLimitPolicy to be configured with the modified rate limit.
"},{"location":"api-quickstart/#application-developer-scaling-the-application","title":"(Application developer) Scaling the application","text":"Deploy the petstore to the 2nd cluster:
kustomize build ./resources/ | envsubst | kubectl --context kind-api-workload-2 apply -f-\nkubectl --context kind-api-workload-2 apply -f ./resources/authpolicy.yaml\nkubectl --context kind-api-workload-2 apply -f ./resources/ratelimitpolicy.yaml\n
Configure the app REGION
to be us
:
kubectl --context kind-api-workload-2 apply -k ./resources/us-cluster/\n
"},{"location":"api-quickstart/#platform-engineer-scaling-the-gateway-and-traffic-management","title":"(Platform engineer) Scaling the gateway and traffic management","text":"Deploy the Gateway to the 2nd cluster:
kubectl --context kind-api-control-plane patch placement http-gateway --namespace multi-cluster-gateways --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/numberOfClusters\", \"value\":2}]'\n
Label the 1st cluster as being in the 'EU' region, and the 2nd cluster as being in the 'US' region. These labels are used by the DNSPolicy for configuring geo DNS.
kubectl --context kind-api-control-plane label managedcluster kind-api-workload-1 kuadrant.io/lb-attribute-geo-code=EU --overwrite\nkubectl --context kind-api-control-plane label managedcluster kind-api-workload-2 kuadrant.io/lb-attribute-geo-code=US --overwrite\n
"},{"location":"api-quickstart/#api-consumer-accessing-the-api-from-multiple-regions","title":"(API consumer) Accessing the API from multiple regions","text":"Info
This section is optional. If you'd rather skip this part, you can skip forward to the \"(App developer) API traffic monitoring\" section.
"},{"location":"api-quickstart/#pre-requisites","title":"Pre-requisites","text":" python3
and pip3
: these are required for this part of the walkthrough
To demonstrate traffic management by geographical region, we'll use a tool called 'geosight'. This tool resolves hostnames from different regions, fetches a website from the resulting DNS record address and takes a screenshot. The petstore app has been configured to serve a flag image based on which region it is running in. In the 1st cluster, the EU flag is used. In the 2nd cluster, the US flag is used.
To install 'geosight', run the following commands:
git clone git@github.com:jasonmadigan/geosight.git && cd geosight\npip3 install -r requirements.txt\nplaywright install\n
Then run it using:
python3 app.py\n
Access the webapp at http://127.0.0.1:5001/. In the input box, type the address from below and click the Fetch
button:
echo https://petstore.$KUADRANT_ZONE_ROOT_DOMAIN/server/details\n
After a moment you should see dns results for different regions, and a corresponding screenshot.
If you want to experiment with other regions, check out the Configuration section for geosight and the Kuadrant docs for geo loadbalancing.
"},{"location":"api-quickstart/#app-developer-api-traffic-monitoring","title":"(App developer) API traffic monitoring","text":"To view the App developer dashboard, the same Grafana will be used from the platform engineer steps above: https://grafana.172.31.0.2.nip.io
The most relevant for a app developer is Stitch: App Developer Dashboard
You should see panels about API's including:
- Request and error rates
- API summaries
- API request summaries
- API duration
All corresponding to our HTTPRoute coming from our OAS spec
"},{"location":"api-quickstart/#platform-engineer-apis-summary-view","title":"(Platform Engineer) APIs summary view","text":"Now that the app developer has deployed their app, new metrics and data is now available in the platform engineer dashboard seen in the previous step https://grafana.172.31.0.2.nip.io
:
- Gateways, routes and policies
- Constraints & Violations (there should be no violations present)
- APIs Summary
"},{"location":"api-quickstart/#summary","title":"Summary","text":"You now have a local environment with a reference architecture to design and deploy an API in a kube native way, using Kuadrant and other open source tools.
"},{"location":"api-quickstart/#cleanup","title":"Cleanup","text":"To destroy the previously created kind
clusters, run:
./cleanup.sh\n
Info
DNS records in AWS will remain after cleanup - you can remove these from your zone manually.
"},{"location":"kuadrantctl/","title":"kuadrantctl","text":"kuadrantctl
is a CLI tool for managing Kuadrant configurations and resources.
"},{"location":"kuadrantctl/#installing","title":"Installing","text":"kuadrantctl
can be installed either by downloading pre-compiled binaries or by compiling from source. For most users, downloading the binary is the easiest and recommended method.
"},{"location":"kuadrantctl/#installing-pre-compiled-binaries","title":"Installing Pre-compiled Binaries","text":" - Download the latest binary for your platform from the
kuadrantctl
Releases page. - Unpack the binary.
- Move it to a directory in your
$PATH
so that it can be executed from anywhere.
"},{"location":"kuadrantctl/#compiling-from-source","title":"Compiling from Source","text":"If you prefer to compile from source or are contributing to the project, you can install kuadrantctl
using make install
. This method requires Golang 1.21 or newer.
It is possible to use the make target install
to compile from source. From root of the repository, run
make install\n
This will compile kuadrantctl
and install it in the bin
directory at root of directory. It will also ensure the correct version of the binary is displayed . It can be ran using ./bin/kuadrantctl
.
"},{"location":"kuadrantctl/#usage","title":"Usage","text":"Below is a high-level overview of its commands, along with links to detailed documentation for more complex commands.
"},{"location":"kuadrantctl/#general-syntax","title":"General Syntax","text":"kuadrantctl [command] [subcommand] [flags]\n
"},{"location":"kuadrantctl/#commands-overview","title":"Commands Overview","text":"Command Description completion
Generate autocompletion scripts for the specified shell generate
Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications help
Help about any command version
Print the version number of kuadrantctl
"},{"location":"kuadrantctl/#flags","title":"Flags","text":"Flag Description -h
, --help
Help for kuadrantctl
-v
, --verbose
Enable verbose output"},{"location":"kuadrantctl/#commands-detail","title":"Commands Detail","text":""},{"location":"kuadrantctl/#completion","title":"completion
","text":"Generate an autocompletion script for the specified shell.
Subcommand Description bash
Generate script for Bash fish
Generate script for Fish powershell
Generate script for PowerShell zsh
Generate script for Zsh"},{"location":"kuadrantctl/#generate","title":"generate
","text":"Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications.
Subcommand Description gatewayapi
Generate Gateway API resources kuadrant
Generate Kuadrant resources"},{"location":"kuadrantctl/#generate-gatewayapi","title":"generate gatewayapi
","text":"Generate Gateway API resources from an OpenAPI 3.x specification
Subcommand Description Flags httproute
Generate Gateway API HTTPRoute from OpenAPI 3.0.X --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#generate-kuadrant","title":"generate kuadrant
","text":"Generate Kuadrant resources from an OpenAPI 3.x specification
Subcommand Description Flags authpolicy
Generate a Kuadrant AuthPolicy from an OpenAPI 3.0.x specification --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\") ratelimitpolicy
Generate Kuadrant RateLimitPolicy from an OpenAPI 3.0.x specification --oas string
Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o
Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#version","title":"version
","text":"Print the version number of kuadrantctl
.
No additional flags or subcommands.
"},{"location":"kuadrantctl/#additional-guides","title":"Additional Guides","text":""},{"location":"kuadrantctl/#generating-gateway-api-httproute-objects","title":"Generating Gateway API HTTPRoute Objects","text":" - Generates Gateway API HTTPRoute objects from an OpenAPI Specification (OAS) 3.x.
- Supports reading from a file, URL, or stdin.
- Example usages and more information can be found in the detailed guide.
"},{"location":"kuadrantctl/#generating-kuadrant-authpolicy-objects","title":"Generating Kuadrant AuthPolicy Objects","text":" - Generates Kuadrant AuthPolicy objects for managing API authentication.
- Supports
openIdConnect
and apiKey
types from the OpenAPI Security Scheme Object. - Example usages and more information can be found in the detailed guide.
"},{"location":"kuadrantctl/#generating-kuadrant-ratelimitpolicy-objects","title":"Generating Kuadrant RateLimitPolicy Objects","text":" - Generates Kuadrant RateLimitPolicy objects for managing API rate limiting.
- Supports reading from a file, URL, or stdin.
- Example usages and more information can be found in the detailed guide.
For more detailed information about each command, including options and usage examples, use kuadrantctl [command] --help
.
"},{"location":"kuadrantctl/#using-with-github-actions","title":"Using with GitHub Actions","text":"- name: Install kuadrantctl\n uses: jaxxstorm/action-install-gh-release@v1.10.0\n with: # Grab the latest version\n repo: Kuadrant/kuadrantctl\n
"},{"location":"kuadrantctl/#commands","title":"Commands","text":" - Generate Gateway API HTTPRoute objects from OpenAPI 3.X
- Generate Kuadrant RateLimitPolicy from OpenAPI 3.X
- Generate Kuadrant AuthPolicy from OpenAPI 3.X
"},{"location":"kuadrantctl/#contributing","title":"Contributing","text":"The Development guide describes how to build the kuadrantctl CLI and how to test your changes before submitting a patch or opening a PR.
"},{"location":"kuadrantctl/#licensing","title":"Licensing","text":"This software is licensed under the Apache 2.0 license.
See the LICENSE and NOTICE files that should have been provided along with this software for details.
"},{"location":"kuadrantctl/doc/development/","title":"Development Guide","text":""},{"location":"kuadrantctl/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":" - git
- go version 1.21+
"},{"location":"kuadrantctl/doc/development/#build-the-cli","title":"Build the CLI","text":"$ git clone https://github.com/kuadrant/kuadrantctl.git\n$ cd kuadrantctl && make install\n$ bin/kuadrantctl version\n{\"level\":\"info\",\"ts\":\"2023-11-08T23:44:57+01:00\",\"msg\":\"kuadrantctl version: latest\"}\n
"},{"location":"kuadrantctl/doc/development/#quick-steps-to-contribute","title":"Quick steps to contribute","text":" - Fork the project.
- Download your fork to your PC (
git clone https://github.com/your_username/kuadrantctl && cd kuadrantctl
) - Create your feature branch (
git checkout -b my-new-feature
) - Make changes and run tests (
make test
) - Add them to staging (
git add .
) - Commit your changes (
git commit -m 'Add some feature'
) - Push to the branch (
git push origin my-new-feature
) - Create new pull request
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/","title":"Generating Gateway API HTTPRoutes","text":""},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#generate-gateway-api-httproute-object-from-openapi-3","title":"Generate Gateway API HTTPRoute object from OpenAPI 3","text":"The kuadrantctl generate gatewayapi httproute
command generates an Gateway API HTTPRoute from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the cli by one of the following channels:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from stdin standard input stream.
"},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#usage","title":"Usage","text":"$ kuadrantctl generate gatewayapi httproute -h\nGenerate Gateway API HTTPRoute from OpenAPI 3.0.X\n\nUsage:\n kuadrantctl generate gatewayapi httproute [flags]\n\nFlags:\n -h, --help help for httproute\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Under the example folder there are examples of OAS 3 that can be used to generate the resources
As an AuthPolicy and RateLimitPolicy both require a HTTPRoute to target, the user guides for generating those policies include examples of running the kuadrantctl generate gatewayapi httproute
command.
You can find those guides here:
- Generate Kuadrant AuthPolicy
- Generate Kuadrant RateLimitPolicy
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/","title":"Generating Kuadrant AuthPolicies","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#generate-kuadrant-authpolicy-object-from-openapi-3","title":"Generate Kuadrant AuthPolicy object from OpenAPI 3","text":"The kuadrantctl generate kuadrant authpolicy
command generates an Kuadrant AuthPolicy from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the cli by one of the following channels:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from stdin standard input stream.
OpenAPI Security Scheme Object types
Types Implemented openIdConnect
YES apiKey
YES http
NO oauth2
NO"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openidconnect-type-description","title":"openIdConnect
Type Description","text":"The following OAS example has one protected endpoint GET /dog
with openIdConnect
security scheme type.
paths:\n /dog:\n get:\n operationId: \"getDog\"\n security:\n\n - securedDog: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n securedDog:\n type: openIdConnect\n openIdConnectUrl: https://example.com/.well-known/openid-configuration\n
Running the command
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml | yq -P\n
The generated authpolicy (only relevan fields shown here):
kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\nspec:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v1/dog\n method: GET\n rules:\n authentication:\n getDog_securedDog:\n credentials: {}\n jwt:\n issuerUrl: https://example.com/.well-known/openid-configuration\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /api/v1/dog\n method: GET\n
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#apikey-type-description","title":"apiKey
Type Description","text":"The following OAS example has one protected endpoint GET /dog
with apiKey
security scheme type.
paths:\n /dog:\n get:\n operationId: \"getDog\"\n security:\n\n - securedDog: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n securedDog:\n type: apiKey\n name: dog_token\n in: query\n
Running the command
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml | yq -P\n
The generated authpolicy (only relevan fields shown here):
kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\nspec:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /dog\n method: GET\n rules:\n authentication:\n getDog_securedDog:\n credentials:\n queryString:\n name: dog_token\n apiKey:\n selector:\n matchLabels:\n kuadrant.io/apikeys-by: securedDog\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /dog\n method: GET\n
In this particular example, the endpoint GET /dog
will be protected. The token needs to be in the query string of the request included in a parameter named dog_token
. Kuadrant will validate received tokens against tokens found in kubernetes secrets with label kuadrant.io/apikeys-by: ${sec scheme name}
. In this particular example the label selector will be: kuadrant.io/apikeys-by: securedDog
.
Like the following example:
apiVersion: v1\nkind: Secret\nmetadata:\n name: api-key-1\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: securedDog\nstringData:\n api_key: MYSECRETTOKENVALUE\ntype: Opaque\n
Note: Kuadrant validates tokens against api keys found in secrets. The label selector format kuadrant.io/apikeys-by: ${sec scheme name}
is arbitrary and designed for this CLI command.
For more information about Kuadrant auth based on api key: https://docs.kuadrant.io/authorino/docs/user-guides/api-key-authentication/
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#usage","title":"Usage","text":"Generate Kuadrant AuthPolicy from OpenAPI 3.0.X\n\nUsage:\n kuadrantctl generate kuadrant authpolicy [flags]\n\nFlags:\n -h, --help help for authpolicy\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Under the example folder there are examples of OAS 3 that can be used to generate the resources
"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#user-guide","title":"User Guide","text":"The verification steps will lead you to the process of deploying and testing the following api with endpoints protected using different security schemes:
Operation Security Scheme GET /api/v1/cat
public (not auth) POST /api/v1/cat
ApiKey in header GET /api/v1/dog
OpenIdConnect GET /api/v1/snake
OpenIdConnect OR ApiKey in query string - [Optional] Setup SSO service supporting OIDC. For this example, we will be using keycloak.
- Create a new realm
petstore
- Create a client
petstore
. In the Client Protocol field, select openid-connect
. - Configure client settings. Access Type to public. Direct Access Grants Enabled to ON (for this example password will be used directly to generate the token).
- Add a user to the realm
- Click the Users menu on the left side of the window. Click Add user.
- Type the username
bob
, set the Email Verified switch to ON, and click Save. - On the Credentials tab, set the password
p
. Enter the password in both the fields, set the Temporary switch to OFF to avoid the password reset at the next login, and click Set Password
.
Now, let's run local cluster to test the kuadrantctl new command to generate authpolicy.
- Clone the repo
git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n
- Setup a cluster, Istio and Gateway API CRDs and Kuadrant
Use our single-cluster quick start script - this will install Kuadrant in a local kind
cluster: https://docs.kuadrant.io/getting-started-single-cluster/
- Build and install CLI in
bin/kuadrantctl
path
make install\n
- Deploy petstore backend API
kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n
- Let's create Petstore's OpenAPI spec
cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.1.0\"\ninfo:\n title: \"Pet Store API\"\n version: \"1.0.0\"\nx-kuadrant:\n route:\n name: \"petstore\"\n namespace: \"petstore\"\n hostnames:\n\n - example.com\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\nservers:\n - url: https://example.io/api/v1\npaths:\n /cat:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # No sec requirements\n operationId: \"getCat\"\n responses:\n 405:\n description: \"invalid input\"\n post: # API key\n operationId: \"postCat\"\n security:\n - cat_api_key: []\n responses:\n 405:\n description: \"invalid input\"\n /dog:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # OIDC\n operationId: \"getDog\"\n security:\n - oidc:\n - read:dogs\n responses:\n 405:\n description: \"invalid input\"\n /snake:\n x-kuadrant:\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n get: # OIDC or API key\n operationId: \"getSnake\"\n security:\n - oidc: [\"read:snakes\"]\n - snakes_api_key: []\n responses:\n 405:\n description: \"invalid input\"\ncomponents:\n securitySchemes:\n cat_api_key:\n type: apiKey\n name: api_key\n in: header\n oidc:\n type: openIdConnect\n openIdConnectUrl: https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore\n snakes_api_key:\n type: apiKey\n name: snake_token\n in: query\nEOF\n
Replace ${KEYCLOAK_PUBLIC_DOMAIN}
with your SSO instance domain
-
Create an API key only valid for POST /api/v1/cat
endpoint
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: cat-api-key-1\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: cat_api_key\nstringData:\n api_key: I_LIKE_CATS\ntype: Opaque\nEOF\n
Note: the label's value of kuadrant.io/apikeys-by: cat_api_key
is the name of the sec scheme of the OpenAPI spec.
-
Create an API key only valid for GET /api/v1/snake
endpoint
kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n name: snake-api-key-1\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: snakes_api_key\nstringData:\n api_key: I_LIKE_SNAKES\ntype: Opaque\nEOF\n
Note: the label's value of kuadrant.io/apikeys-by: snakes_api_key
is the name of the sec scheme of the OpenAPI spec.
- Create the HTTPRoute using the CLI
bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
- Create Kuadrant's Auth Policy
bin/kuadrantctl generate kuadrant authpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
Now, we are ready to test OpenAPI endpoints
GET /api/v1/cat
-> It's a public endpoint, hence should return 200 Ok
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/cat\"\n
POST /api/v1/cat
-> It's a protected endpoint with apikey
Without any credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"credential not found\"}\ndate: Tue, 28 Nov 2023 22:28:44 GMT\nserver: istio-envoy\ncontent-length: 0\n
The reason headers tell that credential not found
. Credentials satisfying postCat_cat_api_key
authentication is needed.
According to the OpenAPI spec, it should be a header named api_key
. What if we try a wrong token? one token assigned to other endpoint, i.e. I_LIKE_SNAKES
instead of the valid one I_LIKE_CATS
. It should return 401 Unauthorized
.
curl -H \"Host: example.com\" -H \"api_key: I_LIKE_SNAKES\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"the API Key provided is invalid\"}\ndate: Tue, 28 Nov 2023 22:32:55 GMT\nserver: istio-envoy\ncontent-length: 0\n
The reason headers tell that the API Key provided is invalid
. Using valid token (from the secret cat-api-key-1
assigned to POST /api/v1/cats
) in the api_key
header should return 200 Ok
curl -H \"Host: example.com\" -H \"api_key: I_LIKE_CATS\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
GET /api/v1/dog
-> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore
realm)
without credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/dog\"\n
To get the authentication token, this example is using Direct Access Grants oauth2 grant type (also known as Client Credentials grant type). When configuring the Keycloak (OIDC provider) client settings, we enabled Direct Access Grants to enable this procedure. We will be authenticating as bob
user with p
password. We previously created bob
user in Keycloak in the petstore
realm.
export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d 'grant_type=password' \\\n -d 'client_id=petstore' \\\n -d 'scope=openid' \\\n -d 'username=bob' \\\n -d 'password=p' \"https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore/protocol/openid-connect/token\" | jq -r '.access_token')\n
Replace ${KEYCLOAK_PUBLIC_DOMAIN}
with your SSO instance domain
With the access token in place, let's try to get those puppies
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/dog -i\n
it should return 200 OK
GET /api/v1/snake
-> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore
realm) OR with apiKey
This example is to show that multiple security requirements (with OR semantics) can be specified for an OpenAPI operation.
Without credentials, it should return 401 Unauthorized
curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/snake\"\n
With the access token in place, it should return 200 OK (unless the token has expired).
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/snake -i\n
With apiKey it should also work. According to the OpenAPI spec security scheme, it should be a query string named snake_token
and the token needs to be valid token (from the secret snake-api-key-1
assigned to GET /api/v1/snake
)
curl -H 'Host: example.com' -i \"http://127.0.0.1:9080/api/v1/snake?snake_token=I_LIKE_SNAKES\"\n
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/","title":"Generating Kuadrant RateLimitPolicies","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#generate-kuadrant-ratelimitpolicy-object-from-openapi-3","title":"Generate Kuadrant RateLimitPolicy object from OpenAPI 3","text":"The kuadrantctl generate kuadrant ratelimitpolicy
command generates a Kuadrant RateLimitPolicy from your OpenAPI Specification (OAS) 3.x document powered with Kuadrant extensions.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#openapi-specification","title":"OpenAPI specification","text":"An OpenAPI document resource can be provided to the Kuadrant CLI in one of the following ways:
- Filename in the available path.
- URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
- Read from
stdin
standard input stream.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#usage","title":"Usage","text":"Generate Kuadrant RateLimitPolicy from OpenAPI 3.0.x\n\nUsage:\n kuadrantctl generate kuadrant ratelimitpolicy [flags]\n\nFlags:\n -h, --help help for ratelimitpolicy\n --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n -o Output format: 'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n -v, --verbose verbose output\n
Note: The kuadrantctl/examples
directory in GitHub includes sample OAS 3 files that you can use to generate the resources.
"},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#procedure","title":"Procedure","text":" -
Clone the Git repository as follows:
git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n ```\n2. Set up a cluster, Istio and Gateway API CRDs, and Kuadrant as follows: \n\n\n* Use the single-cluster quick start script to install Kuadrant in a local `kind` cluster: https://docs.kuadrant.io/getting-started-single-cluster/.\n\n\n3. Build and install the CLI in `bin/kuadrantctl` path as follows:\n```bash\nmake install\n
-
Deploy the Petstore backend API as follows:
kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n
-
Create the Petstore OpenAPI definition as follows:
cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.0.3\"\ninfo:\n title: \"Pet Store API\"\n version: \"1.0.0\"\nx-kuadrant: ## Root-level Kuadrant extension\n route:\n name: \"petstore\"\n namespace: \"petstore\"\n hostnames:\n\n - example.com\n parentRefs:\n - name: istio-ingressgateway\n namespace: istio-system\nservers:\n - url: https://example.io/v1\npaths:\n /cat:\n x-kuadrant: ## Path-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit:\n rates:\n - limit: 1\n duration: 10\n unit: second\n counters:\n - request.headers.x-forwarded-for\n get: # Added to the route and rate limited\n operationId: \"getCat\"\n responses:\n 405:\n description: \"invalid input\"\n post: # NOT added to the route\n x-kuadrant: \n disable: true\n operationId: \"postCat\"\n responses:\n 405:\n description: \"invalid input\"\n /dog:\n get: # Added to the route and rate limited\n x-kuadrant: ## Operation-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit:\n rates:\n - limit: 3\n duration: 10\n unit: second\n counters:\n - request.headers.x-forwarded-for\n operationId: \"getDog\"\n responses:\n 405:\n description: \"invalid input\"\n post: # Added to the route and NOT rate limited\n x-kuadrant: ## Operation-level Kuadrant extension\n backendRefs:\n - name: petstore\n port: 80\n namespace: petstore\n operationId: \"postDog\"\n responses:\n 405:\n description: \"invalid input\"\nEOF\n
Note: The servers
base path is not included. This is work-in-progress in follow-up PRs.
Operation Applied configuration GET /cat
Should return 200 OK and be rate limited (1 req / 10 seconds). POST /cat
Not added to the HTTPRoute. Should return 404 Not Found. GET /dog
Should return 200 OK and be rate limited (3 req / 10 seconds). POST /dog
Should return 200 OK and NOT rate limited. -
Create the HTTPRoute by using the CLI as follows:
bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
-
Create the rate limit policy as follows:
bin/kuadrantctl generate kuadrant ratelimitpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
-
Test the OpenAPI endpoints as follows:
-
GET /cat
- Should return 200 OK and be rate limited (1 req / 10 seconds).
curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/cat\"\n
POST /cat
- Not added to the HTTPRoute. Should return 404 Not Found. curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/cat\"\n
GET /dog
- Should return 200 OK and be rate limited (3 req / 10 seconds).
curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/dog\"\n
POST /dog
- Should return 200 OK and NOT rate limited.
curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/dog\"\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/","title":"kuadrantctl - CI/CD with Tekton and Argo CD","text":"This guide demonstrates setting up a CI/CD pipeline by using Tekton to deploy Kubernetes Gateway API and Kuadrant resources generated by kuadrantctl
, from an OpenAPI definition. In this example, these resources are applied directly to the cluster where Tekton is running.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#prerequisites","title":"Prerequisites","text":" - Kuadrant, and all of its prerequisites, installed on a Kubernetes or OpenShift cluster.
- Tekton Pipelines installed on your cluster.
kubectl
configured with access to communicate with your cluster. - Optional: Tekton CLI
tkn
for easier interaction with Tekton resources.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-1-set-up-your-namespace","title":"Step 1 - Set up your namespace","text":"Create a dedicated namespace as follows:
kubectl create namespace petstore\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-2-create-a-persistent-volume-claim","title":"Step 2 - Create a Persistent Volume Claim","text":"For this example, to store associated Tekton build artifacts, create a Persistent Volume Claim (PVC) in the petstore
namespace as follows:
kubectl apply -n petstore -f - <<EOF\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: tekton-kuadrantctl-pvc\n namespace: petstore\nspec:\n accessModes:\n\n - ReadWriteOnce\n resources:\n requests:\n storage: 1Gi\nEOF\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-3-define-the-tekton-task","title":"Step 3 - Define the Tekton Task","text":"Define the task that outlines steps to clone a repository, generate Kuadrant and Kubernetes resources by using kuadrantctl
, and apply them directly to the cluster as follows:
kubectl apply -f - <<'EOF'\napiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\n name: run-kuadrantctl\n namespace: petstore\nspec:\n params:\n\n - name: gitRepoUrl\n description: URL of the git repository to clone\n - name: gitRevision\n description: Git revision to checkout (branch, tag, sha)\n workspaces:\n - name: source\n description: Workspace to checkout the git repo\n - name: kubeconfig\n description: Workspace containing kubeconfig for Kubernetes cluster access\n steps:\n - name: clean-workspace\n image: alpine:latest\n script: |\n sh -c 'rm -rf $(workspaces.source.path)/* $(workspaces.source.path)/.[!.]* $(workspaces.source.path)/..?*'\n - name: clone\n image: alpine/git:latest\n script: |\n git clone $(params.gitRepoUrl) $(workspaces.source.path)\n cd $(workspaces.source.path)\n git checkout $(params.gitRevision)\n - name: download-kuadrantctl\n image: curlimages/curl:latest\n script: |\n ARCH=$(uname -m)\n case $ARCH in\n x86_64) BIN_ARCH=\"amd64\";;\n arm64) BIN_ARCH=\"arm64\";;\n aarch64) BIN_ARCH=\"arm64\";;\n *) echo \"Unsupported architecture: $ARCH\" && exit 1 ;;\n esac\n cd $(workspaces.source.path)\n curl -LO \"https://github.com/Kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\"\n tar -xzf kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\n - name: run-kuadrantctl\n image: alpine:latest\n script: |\n cd $(workspaces.source.path)\n mkdir -p generated-resources\n ./kuadrantctl generate kuadrant authpolicy --oas openapi.yaml | tee generated-resources/authpolicy.yaml\n ./kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | tee generated-resources/ratelimitpolicy.yaml\n ./kuadrantctl generate gatewayapi httproute --oas openapi.yaml | tee generated-resources/httproute.yaml\n - name: apply-resources\n image: bitnami/kubectl\n script: |\n cd $(workspaces.source.path)\n export KUADRANT_ZONE_ROOT_DOMAIN=example.com # domain name used in the HTTPRoute for the petstore sample app\n for file in ./generated-resources/*.yaml; do\n envsubst < \"$file\" | kubectl apply -n petstore -f - \n done\nEOF\n
Note: This example uses Tekton with kubectl
to apply resources to a cluster. It is best to use a tool such as Argo CD to implement continuous delivery by using a GitOps approach. In this scenario, you would do the following:
- Use
kuadrantctl
to generate Kubernetes and Kuadrant resources as part a Tekton pipeline. - Commit these new resources to a Git repository.
- Use ArgoCD to sync these changes from the Git repository to a Kubernetes or OpenShift cluster.
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-4-create-a-kubeconfig-secret","title":"Step 4 - Create a Kubeconfig secret","text":"Important: While this guide uses a kubeconfig
secret for simplicity, do not use this in production environments. Instead, use a service account for enhanced security.
This example uses a kubeconfig
secret and role bindings to demonstrate how to provide access for pushing generated resources to a cluster. However, for production setups, employing a service account is best.
To proceed, create a kubeconfig
secret in the petstore
namespace to provide Tekton with access to your Kubernetes cluster as follows:
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=/path/to/.kube/config -n petstore\n
Create an associated ClusterRole
and ClusterRoleBinding
as follows:
kubectl apply -n petstore -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: kuadrant-ci-example-full-access\nrules:\n\n- apiGroups: [\"*\"]\n resources: [\"*\"]\n verbs: [\"*\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: kuadrant-ci-example-full-access-binding\nsubjects:\n- kind: ServiceAccount\n name: default\n namespace: petstore\nroleRef:\n kind: ClusterRole\n name: kuadrant-ci-example-full-access\n apiGroup: rbac.authorization.k8s.io\nEOF\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-5-trigger-the-taskrun","title":"Step 5 - Trigger the TaskRun","text":"Execute the task from the petstore
namespace, referencing the kubeconfig
secret for cluster access as follows:
This example runs this task with the Kuadrant Petstore app: https://github.com/kuadrant/api-petstore.
kubectl apply -n petstore -f - <<EOF\napiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\n name: run-kuadrantctl-taskrun\n namespace: petstore\nspec:\n taskRef:\n name: run-kuadrantctl\n params:\n\n - name: gitRepoUrl\n value: \"https://github.com/kuadrant/api-petstore.git\"\n - name: gitRevision\n value: \"main\"\n workspaces:\n - name: source\n persistentVolumeClaim:\n claimName: tekton-kuadrantctl-pvc\n - name: kubeconfig\n secret:\n secretName: kubeconfig-secret\nEOF\n
If you have tkn
installed, you can easily view the progress of the pipe run as follows:
tkn taskrun list -n petstore\nNAME STARTED DURATION STATUS\nrun-kuadrantctl-taskrun 12 seconds ago --- Running(Pending)\n
tkn taskrun logs -n petstore -f\n\n\n[clone] Cloning into '/workspace/source'...\n[clone] Already on 'main'\n[clone] Your branch is up to date with 'origin/main'.\n\n[download-kuadrantctl] % Total % Received % Xferd Average Speed Time Time Time Current\n[download-kuadrantctl] Dload Upload Total Spent Left Speed\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n100 21.4M 100 21.4M 0 0 6601k 0 0:00:03 0:00:03 --:--:-- 8756k\n\n[run-kuadrantctl] {\"kind\":\"AuthPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}],\"rules\":{\"authentication\":{\"storeAdmin_api_key\":{\"credentials\":{\"customHeader\":{\"name\":\"api_key\"}},\"apiKey\":{\"selector\":{\"matchLabels\":{\"kuadrant.io/apikeys-by\":\"api_key\"}}},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}]}}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"RateLimitPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"limits\":{\"getInventory\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":10,\"duration\":10,\"unit\":\"second\"}]},\"loginUser\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":2,\"duration\":10,\"unit\":\"second\"}]}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"HTTPRoute\",\"apiVersion\":\"gateway.networking.k8s.io/v1beta1\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"parentRefs\":[{\"kind\":\"Gateway\",\"namespace\":\"kuadrant-multi-cluster-gateways\",\"name\":\"prod-web\"}],\"hostnames\":[\"petstore.${KUADRANT_ZONE_ROOT_DOMAIN}\"],\"rules\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]}]},\"status\":{\"parents\":null}}\n\n[apply-resources] authpolicy.kuadrant.io/petstore created\n[apply-resources] httproute.gateway.networking.k8s.io/petstore created\n[apply-resources] ratelimitpolicy.kuadrant.io/petstore created\n
"},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-6-cleanup","title":"Step 6 - Cleanup","text":"Clean up your resources as follows:
- Remove the
petstore
namespace: kubectl delete ns petstore
- Remove the
ClusterRole
and ClusterRoleBinding
: kubectl delete clusterrole kuadrant-ci-example-full-access
kubectl delete clusterrolebinding kuadrant-ci-example-full-access-binding
"},{"location":"kuadrantctl/doc/openapi-apicurio/","title":"Using Apicurio Studio with Kuadrant OAS extensions","text":"You can use OpenAPI extensions to define extra functionality beyond what is covered by the standard OpenAPI specification. Extensions typically start with the x-
prefix, for example, x-codegen
. Kuadrant OpenAPI extensions start with the x-kuadrant
prefix, and allow you to configure Kuadrant policy information alongside your API.
Apicurio Studio is a UI tool for visualizing and editing OpenAPI designs and definitions, which can visualize security details and custom extensions specified in your OpenAPI definition.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#prerequisites","title":"Prerequisites","text":" - You have Apicurio Studio installed and running. For more information, see the Apicurio Studio documentation.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-apicurio/#step-1-access-your-openapi-definition-in-apicurio-studio","title":"Step 1 - Access your OpenAPI definition in Apicurio Studio","text":"Open or import your OpenAPI definition in Apicurio Studio. On the Design tab, select the VENDOR-EXTENSiONS section to add an extension. Alternatively, you can use the Source tab to edit the API definition directly.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#step-2-add-kuadrant-extensions-to-your-api-definition","title":"Step 2 - Add Kuadrant extensions to your API definition","text":"The following configuration and extension points are supported by Apicurio Studio and the kuadrantctl
CLI:
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-http-route","title":"Generate an HTTP route","text":"To generate an HTTPRoute for the API, add the following x-kuadrant
block to your API definition in Apicurio Studio, replacing values to match your API details and the location of your Gateway:
x-kuadrant:\n route:\n name: petstore\n namespace: petstore\n hostnames:\n\n - 'petstore.example.com'\n parentRefs:\n - name: prod-web\n namespace: kuadrant-multi-cluster-gateways\n kind: Gateway\n
For more details, see Generate Gateway API HTTPRoute object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"To generate an AuthPolicy, add a securityScheme
to the components
block in your API definition. The following securityScheme
requires that an API key header is set:
securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
Although securityScheme
is not an OpenAPI extension, it is used by kuadrantctl
like the other extensions mentioned in this document.
When added, Apicurio Studio will display the following update in the SECURITY SCHEMES section:
For more details, see Generate Kuadrant AuthPolicy object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"To generate a RateLimitPolicy for the API, add the following x-kuadrant
block to a path in your API definition, replacing values to match your API details.
paths:\n /:\n x-kuadrant:\n backendRefs:\n -\n name: petstore\n namespace: petstore\n port: 8080\n rate_limit:\n rates:\n -\n limit: 10\n duration: 10\n unit: second\n
When added, Apicurio Studio will display the following update in the VENDOR-EXTENSiONS section for that specific path:
For more details, see Generate Kuadrant RateLimitPolicy object from OpenAPI 3.
"},{"location":"kuadrantctl/doc/openapi-apicurio/#additional-resources","title":"Additional resources","text":" - OpenAPI 3.0.x Kuadrant Extensions in the kuadrantctl documentation.
- Apicurio Studio - Now with OpenAPI Vendor Extensions.
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/","title":"OpenAPI 3.0.x Kuadrant extensions","text":"This reference information shows examples of how to add Kuadrant extensions at the root, path, or operation level in an OpenAPI 3.0.x definition.
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#root-level-kuadrant-extension","title":"Root-level Kuadrant extension","text":"You can add a Kuadrant extension at the root level of an OpenAPI definition. The following example shows an extension added for a petstore
app:
x-kuadrant:\n route: ## HTTPRoute metadata\n name: \"petstore\"\n namespace: \"petstore\"\n labels: ## map[string]string\n deployment: petstore\n hostnames: ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n - example.com\n parentRefs: ## []gateway.networking.k8s.io/v1beta1.ParentReference\n - name: apiGateway\n namespace: gateways\n
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#path-level-kuadrant-extension","title":"Path-level Kuadrant extension","text":"You can add a Kuadrant extension at the path level of an OpenAPI definition. This configuration at the path level is the default when there is no operation-level configuration. The following example shows an extension added for a /cat
path:
paths:\n /cat:\n x-kuadrant: ## Path-level Kuadrant extension\n disable: true ## Remove from the HTTPRoute. Optional. Default: false\n pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact\n backendRefs: ## Backend references to be included in the HTTPRoute. []gateway.networking.k8s.io/v1beta1.HTTPBackendRef. Optional.\n\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit: ## Rate limit configuration. Optional.\n rates: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n - limit: 1\n duration: 10\n unit: second\n counters: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n - auth.identity.username\n when: ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n
"},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#operation-level-kuadrant-extension","title":"Operation-level Kuadrant extension","text":"You can add a Kuadrant extension at the operation level of an OpenAPI definition. This extension uses the same schema as the path-level Kuadrant extension. The following example shows an extension added for a get
operation:
paths:\n /cat:\n get:\n x-kuadrant: ## Operation-level Kuadrant extension\n disable: true ## Remove from the HTTPRoute. Optional. Default: path level \"disable\" value.\n pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact.\n backendRefs: ## Backend references to be included in the HTTPRoute. Optional.\n\n - name: petstore\n port: 80\n namespace: petstore\n rate_limit: ## Rate limit configuration. Optional.\n rates: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n - limit: 1\n duration: 10\n unit: second\n counters: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n - auth.identity.username\n when: ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n operator: eq\n value: alice\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/","title":"Integrating Kuadrant OAS extensions with Red Hat OpenShift Dev Spaces","text":"OpenAPI extensions enhance the standard OpenAPI specification by adding custom functionality. Kuadrant OpenAPI extensions are identified by the x-kuadrant
prefix. You can use OpenAPI extensions to integrate Kuadrant policies directly into your API definitions.
Red Hat OpenShift Dev Spaces provides a browser-based, cloud-native IDE that supports rapid and decentralized development in container-based environments. This tutorial demonstrates how to use OpenShift Dev Spaces to modify an OpenAPI definition by incorporating Kuadrant policies, and then use the kuadrantctl
CLI to create Kubernetes resources for both Gateway API and Kuadrant.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#prerequisites","title":"Prerequisites","text":" -
You must have access to one of the following Dev Spaces instances:
-
A self-hosted OpenShift Dev Spaces instance.
- An OpenShift Dev Spaces instance provided by the Red Hat Developer Sandbox.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-1-setting-up-your-workspace","title":"Step 1 - Setting up your workspace","text":"Create a workspace in Dev Spaces for your project as follows:
- Fork the following repository: https://github.com/Kuadrant/blank-petstore.
- In Dev Spaces, select Create Workspace, and enter the URL of your forked repository. For example:
https://github.com/<your-username>/blank-petstore.git
. - Click Create & Open.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-2-configuring-vs-code-in-dev-spaces","title":"Step 2 - Configuring VS Code in Dev Spaces","text":"For this tutorial, you will perform the following tasks:
- Install
kuadrantctl
in your workspace to demonstrate Kubernetes resource generation from your modified OpenAPI definition. - Optional: Configure Git with your username and email to enable pushing changes back to your repository.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#install-the-kuadrantctl-cli","title":"Install the kuadrantctl CLI","text":"To install kuadrantctl
in your Dev Spaces workspace, enter the following command:
curl -sL \"https://github.com/kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-amd64.tar.gz\" | tar xz -C /home/user/.local/bin\n
This command installs kuadrantctl
in /home/user/.local/bin
, which is included in the container's $PATH
by default.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#optional-configuring-git","title":"Optional: Configuring Git","text":"If you plan to push changes back to your repository, configure your Git username and email as follows:
git config --global user.email \"foo@example.com\"\ngit config --global user.name \"Foo Example\"\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-3-adding-kuadrant-policies-to-your-openapi-definition","title":"Step 3 - Adding Kuadrant policies to your OpenAPI definition","text":"After creating your workspace, Dev Spaces will launch VS Code loaded with your forked repository. Navigate to the openapi.yaml
file in the sample app to begin modifications.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#kuadrant-policies-overview","title":"Kuadrant policies overview","text":"You will enhance your API definition by applying Kuadrant policies to the following endpoints:
/pet/findByStatus
/user/login
/store/inventory
In this tutorial, you will add Kuadrant policies to your API definition as follows:
- Generate an
HTTPRoute
to expose these three routes for an existing Gateway
. - Add API key authentication for the
/user/login
route, using a Kuadrant AuthPolicy
and OAS securitySchemes
. - Add a Kuadrant
RateLimitPolicy
to the /store/inventory
endpoint, to limit the amount of requests this endpoint can receive.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#defining-a-gateway","title":"Defining a Gateway","text":"Use the x-kuadrant
extension in the root level to specify a Gateway
. This information will be used to generate HTTPRoute
s at the path level. For example:
x-kuadrant:\n route: ## HTTPRoute metadata\n name: \"petstore\"\n namespace: \"petstore\"\n labels: ## map[string]string\n deployment: petstore\n hostnames: ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n - example.com\n parentRefs: ## []gateway.networking.k8s.io/v1beta1.ParentReference\n - name: apiGateway\n namespace: gateways\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#specifying-httproutes-for-each-path","title":"Specifying HTTPRoutes for each path","text":"For each path, add an x-kuadrant
extension with backendRefs
to link your routes to your paths as follows:
/pet/findByStatus:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
/user/login:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
/store/inventory:\n x-kuadrant:\n backendRefs:\n\n - name: petstore\n namespace: petstore\n port: 8080\n get:\n # ...\n
Note: The x-kuadrant
extension at the path level applies to all HTTP methods defined in the path. For method-specific policies, move the extension inside the relevant HTTP method block, for example, get
or post
.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#implementing-authpolicy-and-security-schemes","title":"Implementing AuthPolicy and security schemes","text":"To secure the /user/login
endpoint with API key authentication, use the following configuration:
/user/login:\n # ...\n get:\n security:\n\n - api_key: []\n
components:\n schemas:\n # ...\n securitySchemes:\n api_key:\n type: apiKey\n name: api_key\n in: header\n
This configuration generates an AuthPolicy
that references an API key stored in a labeled Secret
:
apiVersion: v1\nkind: Secret\nmetadata:\n name: petstore-api-key\n namespace: petstore\n labels:\n authorino.kuadrant.io/managed-by: authorino\n kuadrant.io/apikeys-by: api_key\nstringData:\n api_key: secret\ntype: Opaque\n
For simplicity, this example uses a simple, static API key for your app."},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#applying-a-ratelimitpolicy-to-an-endpoint","title":"Applying a RateLimitPolicy to an endpoint","text":"To enforce rate limiting on the /store/inventory
endpoint, add the following x-kuadrant
extension:
/store/inventory:\n get:\n # ...\n x-kuadrant:\n backendRefs:\n # ...\n rate_limit:\n rates:\n\n - limit: 10\n duration: 10\n unit: second\n
This limits to 10 requests every 10 seconds for the /store/inventory
endpoint.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-4-generate-kubernetes-resources-by-using-kuadrantctl","title":"Step 4 - Generate Kubernetes resources by using kuadrantctl","text":"With your extensions in place, you can use kuadrantctl
to generate the follollowing Kubernetes resources:
- An
HTTPRoute
for your petstore
app for each of your endpoints. - An
AuthPolicy
with a simple, static API key from a secret for the /user/login
endpoint. - A
RateLimitPolicy
with a rate limit of 10 requests every 10 seconds for the /store/inventory
endpoint.
In Dev Spaces, select \u2630 > Terminal > New Terminal, and run the following commands:
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-httproute","title":"Generate an HTTPRoute","text":"kuadrantctl generate gatewayapi httproute --oas openapi.yaml\n
This command outputs the following HTTPRoute
:
kind: HTTPRoute\napiVersion: gateway.networking.k8s.io/v1beta1\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n parentRefs:\n\n - namespace: gateways\n name: apiGateway\n hostnames:\n - example.com\n rules:\n - matches:\n - path:\n type: Exact\n value: /api/v3/pet/findByStatus\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n - matches:\n - path:\n type: Exact\n value: /api/v3/store/inventory\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\n backendRefs:\n - name: petstore\n namespace: petstore\n port: 8080\nstatus:\n parents: null\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"kuadrantctl generate kuadrant authpolicy --oas openapi.yaml\n
This command outputs the following AuthPolicy
:
apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: petstore\n namespace: petstore\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\n rules:\n authentication:\n GETuserlogin_api_key:\n credentials:\n customHeader:\n name: api_key\n apiKey:\n selector:\n matchLabels:\n kuadrant.io/apikeys-by: api_key\n routeSelectors:\n - matches:\n - path:\n type: Exact\n value: /api/v3/user/login\n method: GET\nstatus: {}\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml\n
This command outputs the following RateLimitPolicy
:
apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n name: petstore\n namespace: petstore\n creationTimestamp: null\n labels:\n deployment: petstore\nspec:\n targetRef:\n group: gateway.networking.k8s.io\n kind: HTTPRoute\n name: petstore\n namespace: petstore\n limits:\n GETstoreinventory:\n routeSelectors:\n\n - matches:\n - path:\n type: Exact\n value: /api/v3/store/inventory\n method: GET\n rates:\n - limit: 10\n duration: 10\n unit: second\nstatus: {}\n
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-5-applying-resources-to-the-app","title":"Step 5 - Applying resources to the app","text":"Note: By default, the oc
and kubectl
commands in Dev Spaces target the cluster running Dev Spaces. If you want to apply resources to another cluster, you must log in with oc
or kubectl
to another cluster, and pass a different --context
to these commands to apply resources to another cluster.
You can now apply these policies to a running app by using kubectl
or oc
. If Dev Spaces is running on a cluster where Kuadrant is also installed, you can apply these resources as follows:
kuadrantctl generate gatewayapi httproute --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant authpolicy --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | kubectl apply -f -\n
Alternatively, you can use kuadrantctl
as part of a CI/CD pipeline. For more details, see the kuadrantctl CI/CD guide.
If you completed the optional Git configuration step, you can enter git commit
to commit the these changes and push them to your fork.
"},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#additional-resources","title":"Additional resources","text":"For more details, see the following documentation on using x-kuadrant
OAS extensions with kuadrantctl
:
- OpenAPI 3.0.x Kuadrant extensions
- Generate Gateway API HTTPRoutes with
kuadrantctl
- Generate Kuadrant AuthPolicy with
kuadrantctl
- Generate Kuadrant RateLimitPolicy with
kuadrantctl
- kuadrantctl CI/CD guide
"},{"location":"dns-operator/","title":"DNS Operator","text":"The DNS Operator is a kubernetes based controller responsible for reconciling DNS Record and Managed Zone custom resources. It interfaces with cloud DNS providers such as AWS and Google to bring the DNS zone into the state declared in these CRDs. One of the key use cases the DNS operator solves, is allowing complex DNS routing strategies such as Geo and Weighted to be expressed allowing you to leverage DNS as the first layer of traffic management. In order to make these strategies valuable, it also works across multiple clusters allowing you to use a shared domain name balance traffic based on your requirements.
"},{"location":"dns-operator/#getting-started","title":"Getting Started","text":""},{"location":"dns-operator/#pre-setup","title":"Pre Setup","text":""},{"location":"dns-operator/#add-dns-provider-configuration","title":"Add DNS provider configuration","text":"NOTE: You can optionally skip this step but at least one ManagedZone will need to be configured and have valid credentials linked to use the DNS Operator.
"},{"location":"dns-operator/#aws-provider-route53","title":"AWS Provider (Route53)","text":"make local-setup-aws-mz-clean local-setup-aws-mz-generate AWS_ZONE_ROOT_DOMAIN=<MY AWS Zone Root Domain> AWS_DNS_PUBLIC_ZONE_ID=<My AWS DNS Public Zone ID> AWS_ACCESS_KEY_ID=<My AWS ACCESS KEY> AWS_SECRET_ACCESS_KEY=<My AWS Secret Access Key>\n
More details about the AWS provider can be found here"},{"location":"dns-operator/#gcp-provider","title":"GCP Provider","text":"make local-setup-gcp-mz-clean local-setup-gcp-mz-generate GCP_ZONE_NAME=<My GCP ZONE Name> GCP_ZONE_DNS_NAME=<My Zone DNS Name> GCP_GOOGLE_CREDENTIALS='<My GCP Credentials.json>' GCP_PROJECT_ID=<My GCP PROJECT ID>\n
More details about the GCP provider can be found here"},{"location":"dns-operator/#running-controller-locally-default","title":"Running controller locally (default)","text":" -
Create local environment(creates kind cluster)
make local-setup\n
-
Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
make run\n
"},{"location":"dns-operator/#running-controller-on-the-cluster","title":"Running controller on the cluster","text":" -
Create local environment(creates kind cluster)
make local-setup DEPLOY=true\n
-
Verify controller deployment
kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n
"},{"location":"dns-operator/#running-controller-on-existing-cluster","title":"Running controller on existing cluster","text":"You\u2019ll need a Kubernetes cluster to run against. You can use KIND to get a local cluster for testing, or run against a remote cluster. Note: Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster kubectl cluster-info
shows).
-
Apply Operator manifests
kustomize build config/default | kubectl apply -f -\n
-
Verify controller deployment
kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n
"},{"location":"dns-operator/#development","title":"Development","text":""},{"location":"dns-operator/#e2e-test-suite","title":"E2E Test Suite","text":"The e2e test suite can be executed against any cluster running the DNS Operator with configuration added for any supported provider.
make test-e2e TEST_DNS_MANAGED_ZONE_NAME=<My managed zone name> TEST_DNS_ZONE_DOMAIN_NAME=<My domain name> TEST_DNS_NAMESPACE=<My test namesapace> TEST_DNS_PROVIDER=<aws|gcp>\n
Environment Variable Description TEST_DNS_MANAGED_ZONE_NAME Name of the managed zone relevant for the test domain (TEST_DNS_ZONE_DOMAIN_NAME). If using local-setup Managed zones, one of [dev-mz-aws; dev-mz-gcp] TEST_DNS_ZONE_DOMAIN_NAME Domain name being used for the test, must match the domain of the managed zone (TEST_DNS_MANAGED_ZONE_NAME) TEST_DNS_NAMESPACE The namespace to run the test in, must be the same namespace as the TEST_DNS_MANAGED_ZONE_NAME TEST_DNS_PROVIDER DNS Provider currently being tested, one of [aws; gcp]"},{"location":"dns-operator/#modifying-the-api-definitions","title":"Modifying the API definitions","text":"If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
make manifests\n
NOTE: Run make --help
for more information on all potential make
targets
More information can be found via the Kubebuilder Documentation
"},{"location":"dns-operator/#license","title":"License","text":"Copyright 2024.
Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0\n
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"},{"location":"dns-operator/docs/RELEASE/","title":"RELEASE","text":""},{"location":"dns-operator/docs/RELEASE/#release","title":"Release","text":""},{"location":"dns-operator/docs/RELEASE/#new-majorminor-version","title":"New Major.Minor version","text":" - Create a new minor release branch from the HEAD of main:
git checkout -b release-0.2\n
- Run prepare release:
make prepare-release IMG_TAG=release-0.2 VERSION=0.2.0-dev CHANNELS=alpha REPLACES_VERSION=0.1.0\n
- Verify local changes, commit and push:
git add .\ngit commit -m \"prepare-release: release-0.2\"\ngit push upstream release-0.2\n
-
Verify that the build image workflow is triggered and completes for the new branch
-
Do any final testing and bug fixing against the release branch, see Verify OLM Deployment
-
Run prepare release for final version
make prepare-release VERSION=0.2.0 CHANNELS=stable REPLACES_VERSION=0.1.0\n
- Verify local changes, commit, push and tag:
git add .\ngit commit -m \"prepare-release: v0.2.0\"\ngit tag v0.2.0\ngit push upstream release-0.2\ngit push upstream v0.2.0\n
-
Verify that the build image workflow is triggered and completes for the new tag
-
Verify the new version can be installed from the catalog image, see Verify OLM Deployment
-
Release to the community operator index catalogs.
"},{"location":"dns-operator/docs/RELEASE/#new-patch-version","title":"New Patch version","text":" - Checkout minor release branch:
git checkout release-0.2\n
- Run prepare release:
make prepare-release VERSION=0.2.1 CHANNELS=stable REPLACES_VERSION=0.2.0\n
- Verify local changes, commit and push:
git add .\ngit commit -m \"prepare-release: v0.2.1\"\ngit tag v0.2.1\ngit push upstream release-0.2\ngit push upstream v0.2.1\n
-
Verify that the build image workflow is triggered and completes for the new tag
-
Verify the new version can be installed from the catalog image, see Verify OLM Deployment
-
Release to the community operator index catalogs.
"},{"location":"dns-operator/docs/RELEASE/#verify-olm-deployment","title":"Verify OLM Deployment","text":" -
Deploy the OLM catalog image:
make local-setup install-olm deploy-catalog\n
-
Wait for deployment:
kubectl -n dns-operator-system wait --timeout=60s --for=condition=Available deployments --all\ndeployment.apps/dns-operator-controller-manager condition met\n
-
Check the logs:
kubectl -n dns-operator-system logs -f deployment/dns-operator-controller-manager\n
-
Check the version:
$ kubectl -n dns-operator-system get deployment dns-operator-controller-manager --show-labels\nNAME READY UP-TO-DATE AVAILABLE AGE LABELS\ndns-operator-controller-manager 1/1 1 1 5m42s app.kubernetes.io/component=manager,app.kubernetes.io/created-by=dns-operator,\napp.kubernetes.io/instance=controller-manager,app.kubernetes.io/managed-by=kustomize,app.kubernetes.io/name=deployment,app.kubernetes.io/part-of=dns-operator,\ncontrol-plane=dns-operator-controller-manager,olm.deployment-spec-hash=1jPe8AuMpSKHh51nnDs4j25ZgoUrKhF45EP0Wa,olm.managed=true,olm.owner.kind=ClusterServiceVersion,\nolm.owner.namespace=dns-operator-system,olm.owner=dns-operator.v0.2.0-dev,operators.coreos.com/dns-operator.dns-operator-system=\n
"},{"location":"dns-operator/docs/RELEASE/#community-operator-index-catalogs","title":"Community Operator Index Catalogs","text":" - Operatorhub Community Operators
- Openshift Community Operators
"},{"location":"dns-operator/docs/managedzone/","title":"Creating and using a ManagedZone resource.","text":""},{"location":"dns-operator/docs/managedzone/#what-is-a-managedzone","title":"What is a ManagedZone","text":"A ManagedZone is a reference to a DNS zone. By creating a ManagedZone we are instructing the MGC about a domain or subdomain that can be used as a host by any gateways in the same namespace. These gateways can use a subdomain of the ManagedZone.
If a gateway attempts to a use a domain as a host, and there is no matching ManagedZone for that host, then that host on that gateway will fail to function.
A gateway's host will be matched to any ManagedZone that the host is a subdomain of, i.e. test.api.hcpapps.net
will be matched by any ManagedZone (in the same namespace) of: test.api.hcpapps.net
, api.hcpapps.net
or hcpapps.net
.
When MGC wants to create the DNS Records for a host, it will create them in the most exactly matching ManagedZone. e.g. given the zones hcpapps.net
and api.hcpapps.net
the DNS Records for the host test.api.hcpapps.net
will be created in the api.hcpapps.net
zone.
"},{"location":"dns-operator/docs/managedzone/#private-and-public-zones","title":"Private and Public Zones","text":"Some DNS providers offer private zones. While this is something we will want to support in the future, we currently only support public zones.
"},{"location":"dns-operator/docs/managedzone/#delegation","title":"Delegation","text":"Delegation allows you to give control of a subdomain of a root domain to MGC while the root domain has it's DNS zone elsewhere.
In the scenario where a root domain has a zone outside Route53, e.g. external.com
, and a ManagedZone for delegated.external.com
is required, the following steps can be taken:
- Create the ManagedZone for
delegated.external.com
and wait until the status is updated with an array of nameservers (e.g. ns1.hcpapps.net
, ns2.hcpapps.net
). - Copy these nameservers to your root zone for
external.com
, you can create a NS record for each nameserver against the delegated.external.com
record.
For example:
delegated.external.com. 3600 IN NS ns1.hcpapps.net.\ndelegated.external.com. 3600 IN NS ns2.hcpapps.net.\n
Now, when MGC creates a DNS record in it's Route53 zone for delegated.external.com
, it will be resolved correctly.
"},{"location":"dns-operator/docs/managedzone/#creating-a-managedzone","title":"Creating a ManagedZone","text":"To create a ManagedZone
, you will first need to create a DNS provider Secret. To create one, see our DNS Provider setup guide, and make note of your provider's secret name.
"},{"location":"dns-operator/docs/managedzone/#example-managedzone","title":"Example ManagedZone","text":"To create a new ManagedZone
with AWS Route, with a DNS Provider secret named my-aws-credentials
:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: my-test-aws-zone\n namespace: multi-cluster-gateways\nspec:\n domainName: mydomain.example.com\n description: \"My Managed Zone\"\n dnsProviderSecretRef:\n name: my-aws-credentials\nEOF\n
This will create a new Zone in AWS, for mydomain.example.com
, using the DNS Provider credentials in the my-aws-credentials
Secret.
If you'd like to create a ManagedZone
for an existing zone in AWS, note its Zone ID and run:
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: ManagedZone\nmetadata:\n name: my-test-aws-zone\n namespace: multi-cluster-gateways\nspec:\n id: MYZONEID\n domainName: mydomain.example.com\n description: \"My Managed Zone\"\n dnsProviderSecretRef:\n name: my-aws-credentials\nEOF\n
"},{"location":"dns-operator/docs/managedzone/#dnsprovidersecretref","title":"dnsProviderSecretRef","text":"This is a reference to secret containing the credentials and other configuration for accessing your dns provider dnsProvider
Note: the Secret referenced in the dnsProviderSecretRef
field must be in the same namespace as the ManagedZone.
Note: as an id
was specified, the Managed Gateway Controller will not re-create this zone, nor will it delete it if this ManagedZone
is deleted.
"},{"location":"dns-operator/docs/managedzone/#spec-of-a-managedzone","title":"Spec of a ManagedZone","text":"The ManagedZone is a simple resource with an uncomplicated API, see a sample here.
"},{"location":"dns-operator/docs/provider/","title":"Configuring a DNS Provider","text":"In order to be able to interact with supported DNS providers, Kuadrant needs a credential that it can use.
"},{"location":"dns-operator/docs/provider/#supported-providers","title":"Supported Providers","text":"Kuadrant Supports the following DNS providers currently
- AWS Route 53 (AWS)
- Google Cloud DNS (GCP)
"},{"location":"dns-operator/docs/provider/#aws-route-53-provider","title":"AWS Route 53 Provider","text":"Kuadrant expects a Secret
with a credential. Below is an example for AWS Route 53. It is important to set the secret type to aws
:
kubectl create secret generic my-aws-credentials \\\n --namespace=kuadrant-dns-system \\\n --type=kuadrant.io/aws \\\n --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n --from-literal=AWS_REGION=eu-west-1 \\\n --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n
Key Example Value Description AWS_REGION
eu-west-1
AWS Region AWS_ACCESS_KEY_ID
XXXX
AWS Access Key ID (see note on permissions below) AWS_SECRET_ACCESS_KEY
XXXX
AWS Secret Access Key"},{"location":"dns-operator/docs/provider/#aws-iam-permissions-required","title":"AWS IAM Permissions Required","text":"We have tested using the available policy AmazonRoute53FullAccess
however it should also be possible to restrict the credential down to a particular zone. More info can be found in the AWS docs:
https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/access-control-managing-permissions.html
"},{"location":"dns-operator/docs/provider/#google-cloud-dns-provider","title":"Google Cloud DNS Provider","text":"Kuadant expects a secret with a credential. Below is an example for Google DNS. It is important to set the secret type to gcp
:
kubectl create secret generic my-test-gcp-credentials \\\n --namespace=kuadrant-dns-system \\\n --type=kuadrant.io/gcp \\\n --from-literal=PROJECT_ID=xxx \\\n --from-file=GOOGLE=$HOME/.config/gcloud/application_default_credentials.json\n
Env Var Example Value Description GOOGLE
{\"client_id\": \"***\",\"client_secret\": \"***\",\"refresh_token\": \"***\",\"type\": \"authorized_user\"}
This is the JSON created from either the credential created by the gcloud
CLI, or the JSON from the Service account PROJECT_ID
my_project_id
ID to the Google project"},{"location":"dns-operator/docs/provider/#google-cloud-dns-access-permissions-required","title":"Google Cloud DNS Access permissions required","text":"See: https://cloud.google.com/dns/docs/access-control#dns.admin
"},{"location":"dns-operator/docs/provider/#where-to-create-the-secrets","title":"Where to create the Secrets","text":"It is recommended that you create the secret in the same namespace as your ManagedZones
. In the examples above, we've stored these in a namespace called kuadrant-dns-system
.
Now that we have the credential created we have a DNS provider ready to go and can start using it.
"},{"location":"dns-operator/docs/provider/#using-a-credential","title":"Using a Credential","text":"Once a Secret
like the one shown above is created, in order for it to be used, it needs to be associated with a ManagedZone
.
See ManagedZone
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/","title":"List of issues","text":" - Re-queue validation intermittently GH-36
- Re-queue DNS Record whenever a write to the Cloud Provider occurs GH-35
- Schedule removal of finalizer from DNS Records GH-38
- Record write attempts in status for current generation GH-34
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#the-idea","title":"The idea","text":"We now will constantly reconcile DNS records. The reasoning is that other controllers may override/change records in the DNS provider so there is a need to requeue the DNS Record from time to time even when no local changes are introduced.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#details","title":"Details","text":"There are a few new fields on the DNS Record status:
- QueuedAt is a time when the DNS record was received for the reconciliation
- QueuedFor is a time when we expect a DNS record to be reconciled again
- ValidFor indicates the duration since the last reconciliation we consider data in the record to be valid
- WriteCounter represents a number of consecutive write attempts on the same generation of the record. It is being reset to 0 when the generation changes or there are no changes to write.
There is an option to override the ValidFor
and DefaultRequeueTime
with valid-for
and requeue-time
flags respectively.
The DefaultRequeueTime
is the duration between successful validation and the next reconciliation to ensure that the record is still up-to-date.
The ValidFor
is used to determine if we should do a full reconciliation when we get the record. If the record is still valid we will only update finalizers and validate the record itself. It will not perform anything that involves a DNS provider.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#dns-record-normal-lifecycle","title":"DNS Record normal lifecycle","text":"Once we enqueue the DNS record, controller will compile a list of changes to the DNS provider and will apply it. After this, the record is enqueued with the validationRequeueTime
and the Ready
condition will be marked as false
with a message Awaiting Validation
. When the record is received again and the controller ensures there are no changes needed (the ones applied are present in the DNS Provider) it sets the Ready
condition to true
and enqueues it with the defaultRequeueTime
.
At any time when the record is requeued we also set the record.Status.QueuedFor
field with a timestamp for when we expect to receive the record again. And on every reconciliation we set the record.Status.QueuedAt
to be the time of the reconciliation.
Upon deletion, the process will be similar. The controller will determine the changes needed to the DNS provider and will apply them. The record will be requeued with the validationRequeueTime
. Once we receive it back and ensure that there are no changes needed for the DNS provider we remove the finalizer from the record.
The validationRequeueTime
duration is randomized +/- 50%.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#when-things-go-south","title":"When things go south","text":"If the record is received prematurely - the ValidFor
+ QueuedAt
is more than the current time - we requeue it again for the ValidFor
duration.
When we encounter an error during the reconciliation we will not requeue the record and will put in an appropriate error message in the log and on the record. In order for it to reconcile again there must be a change to the DNS Record CR.
It is possible for a user to mess with the timestamps field or the ValidFor
field. Kubernetes will not let setting an invalid value to the timestamp fields. Once the timestamp fields are set manually it will trigger reconciliation since there is a change in the record CR. The only one that could impact the controller is the QueuedAt
field and the controller will believe that to be the last time the record was reconciled. As for the ValidFor
: since it is a simple string it is possible to set an incorrect value. If we fail to parse it we treat the ValidFor
as 0. This means that the controller will believe that the information in the record is expired and will probe the DNS provider for an update. If a valid value is provided controller will obey it. Eventually, the controller will naturally enqueue the record and those values will be overridden.
In case the controller fails to retain changes in the DNS Provider: write are successful, but the validation fails again and the WriteCounter
reaches the WriteCounterLimit
we give up on the reconciliation. The appropriate message will be put under the Ready - false
condition as well as in the logs of the controller. The reconciliation will resume once the generation of the DNS Record is changed.
"},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#metrics","title":"Metrics","text":"There is a metric emitted from the controller: dns_provider_write_counter
. It reflects the WriteCounter
field in the status of the record.
"},{"location":"dns-operator/docs/reference/dnsrecord/","title":"The DNSRecord Custom Resource Definition (CRD)","text":" - DNSRecord
- DNSRecordSpec
- DNSRecordStatus
"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecord","title":"DNSRecord","text":"Field Type Required Description spec
DNSRecordSpec Yes The specification for DNSRecord custom resource status
DNSRecordStatus No The status for the custom resource"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordspec","title":"DNSRecordSpec","text":"Field Type Required Description ownerID
String Yes Unique string used to identify the owner of this record rootHost
String Yes Single root host of all endpoints in a DNSRecord managedZone
ManagedZoneReference Yes Reference to a ManagedZone instance to which this record will publish its endpoints endpoints
[]ExternalDNS Endpoint No Endpoints to manage in the dns provider healthCheck
HealthCheckSpec No Health check configuration"},{"location":"dns-operator/docs/reference/dnsrecord/#managedzonereference","title":"ManagedZoneReference","text":"Field Type Required Description name
String Yes Name of a managed zone"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description endpoint
String Yes Endpoint is the path to append to the host to reach the expected health check port
Number Yes Port to connect to the host on protocol
String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold
Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordstatus","title":"DNSRecordStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec conditions
[]Kubernetes meta/v1.Condition List of conditions that define the status of the resource queuedAt
Kubernetes meta/v1.Time QueuedAt is a time when DNS record was received for the reconciliation queuedFor
Kubernetes meta/v1.Time QueuedFor is a time when we expect a DNS record to be reconciled again validFor
String ValidFor indicates duration since the last reconciliation we consider data in the record to be valid writeCounter
Number WriteCounter represent a number of consecutive write attempts on the same generation of the record endpoints
[]ExternalDNS Endpoint Endpoints are the last endpoints that were successfully published by the provider healthCheck
HealthCheckStatus Health check status"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the health checks probes
[]HealthCheckStatusProbe Health check Probe status"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatusprobe","title":"HealthCheckStatusProbe","text":"Field Type Description id
String The health check id ipAddress
String The ip address being monitored host
String The host being monitored synced
Boolean Synced conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the probe"},{"location":"dns-operator/docs/reference/managedzone/","title":"The ManagedZone Custom Resource Definition (CRD)","text":" - ManagedZone
- ManagedZoneSpec
- ManagedZoneStatus
"},{"location":"dns-operator/docs/reference/managedzone/#managedzone","title":"ManagedZone","text":"Field Type Required Description spec
ManagedZoneSpec Yes The specification for ManagedZone custom resource status
ManagedZoneStatus No The status for the custom resource"},{"location":"dns-operator/docs/reference/managedzone/#managedzonespec","title":"ManagedZoneSpec","text":"Field Type Required Description id
String No ID is the provider assigned id of this zone (i.e. route53.HostedZone.ID) domainName
String Yes Domain name of this ManagedZone description
String No Description for this ManagedZone parentManagedZone
ManagedZoneReference No Reference to another managed zone that this managed zone belongs to dnsProviderSecretRef
SecretRef No Reference to a secret containing provider credentials"},{"location":"dns-operator/docs/reference/managedzone/#managedzonereference","title":"ManagedZoneReference","text":"Field Type Required Description name
String Yes Name of a managed zone"},{"location":"dns-operator/docs/reference/managedzone/#secretref","title":"SecretRef","text":"Field Type Required Description name
String Yes Name of the secret namespace
String Yes Namespace of the secret"},{"location":"dns-operator/docs/reference/managedzone/#managedzonestatus","title":"ManagedZoneStatus","text":"Field Type Description observedGeneration
String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec conditions
[]Kubernetes meta/v1.Condition List of conditions that define that status of the resource id
String The ID assigned by this provider for this zone (i.e. route53.HostedZone.ID) recordCount
Number The number of records in the provider zone nameServers
[]String The NameServers assigned by the provider for this zone (i.e. route53.DelegationSet.NameServers)"}]}
\ No newline at end of file
diff --git a/0.8.0/sitemap.xml b/0.8.0/sitemap.xml
index cc701dd0..6517cd3e 100644
--- a/0.8.0/sitemap.xml
+++ b/0.8.0/sitemap.xml
@@ -2,717 +2,717 @@
https://docs.kuadrant.io/0.8.0/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/getting-started-multi-cluster-ocm/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/getting-started-multi-cluster/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/getting-started-single-cluster/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/auth/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/development/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/dns/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/dnshealthchecks/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/logging/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/rate-limiting/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/tls/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/install/install-openshift/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/observability/examples/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/observability/metrics/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/observability/tracing/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/authpolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/dnspolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/kuadrant/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/ratelimitpolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/route-selectors/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/reference/tlspolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/auth-for-app-devs-and-platform-engineers/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-for-app-developers/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/authenticated-rl-with-jwt-and-k8s-authnz/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/gateway-dns/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/gateway-rl-for-cluster-operators/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/gateway-tls/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect-single-multi-cluster/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/secure-protect-connect/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/doc/user-guides/simple-rl-for-app-developers/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrant-operator/examples/alerts/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/architecture/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/code_of_conduct/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/contributing/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/features/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/getting-started/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/terminology/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/anonymous-access/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/api-key-authentication/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/authzed/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/caching/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/deny-with-redirect-to-login/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/envoy-jwt-authn-and-authorino/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/external-metadata/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/hello-world/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/host-override/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/http-basic-authentication/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/injecting-data/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/json-pattern-matching-authorization/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/keycloak-authorization-services/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/kubernetes-subjectaccessreview/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/kubernetes-tokenreview/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/mtls-authentication/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/oauth2-token-introspection/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/observability/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/oidc-jwt-authentication/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/oidc-rbac/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/oidc-user-info/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/opa-authorization/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/passing-credentials/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/resource-level-authorization-uma/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/sharding/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/token-normalization/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino/docs/user-guides/validating-webhook/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/authorino-operator/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/doc/how-it-works/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/doc/topologies/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/doc/migrations/conditions/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/doc/server/configuration/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador-server/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador-server/kubernetes/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador-server/sandbox/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador-server/sandbox/redis-otel/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador/limitador-server/sandbox/redis-tls/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/development/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/logging/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/rate-limit-headers/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/resource-requirements/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/storage/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/limitador-operator/doc/tracing/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/contribution/contributing/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/contribution/vscode-debugging/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/gateways/define-and-place-a-gateway/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/gateways/gateway-deletion/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/api-walkthrough/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/metrics-walkthrough/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/multicluster-gateways-walkthrough/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/multicluster-loadbalanced-dnspolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/simple-ratelimitpolicy-for-app-developers/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/how-to/template/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/installation/control-plane-installation/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/installation/service-protection-installation/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/DNSPolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/DNSRecordStructure/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/multiple-dns-provider-support/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/provider-agnostic-dns-health-checks/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/status-aggregation/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/template/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/aws/aws/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/azure/azure/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/proposals/assets/multiple-dns-provider-support/google/google/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/multicluster-gateway-controller/docs/versioning/olm/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/docs/design/architectural-overview-v1/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/docs/design/architectural-overview/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/docs/design/modular_installation/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0001-rlp-v2/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0002-well-known-attributes/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0003-dns-policy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0004-policy-status/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0005-single-cluster-dnspolicy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0006-kuadrant_sub_components_configurations/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0007-policy-sync-v1/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0008-kuadrant-release-process/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/architecture/rfcs/0009-defaults-and-overrides/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/api-quickstart/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/development/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/generate-gateway-api-httproute/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/generate-kuadrant-auth-policy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/kuadrantctl-ci-cd/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/openapi-apicurio/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/openapi-kuadrant-extensions/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/kuadrantctl/doc/openapi-openshift-dev-spaces/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/RELEASE/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/managedzone/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/provider/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/reference/dnsrecord/
- 2024-06-27
+ 2024-07-05
daily
https://docs.kuadrant.io/0.8.0/dns-operator/docs/reference/managedzone/
- 2024-06-27
+ 2024-07-05
daily
\ No newline at end of file
diff --git a/0.8.0/sitemap.xml.gz b/0.8.0/sitemap.xml.gz
index 6d39fdb2..1e602282 100644
Binary files a/0.8.0/sitemap.xml.gz and b/0.8.0/sitemap.xml.gz differ