diff --git a/docs/examples/ferretdb/autoscaling/compute/autoscaler.yaml b/docs/examples/ferretdb/autoscaling/compute/autoscaler.yaml new file mode 100644 index 0000000000..84149d5468 --- /dev/null +++ b/docs/examples/ferretdb/autoscaling/compute/autoscaler.yaml @@ -0,0 +1,21 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: FerretDBAutoscaler +metadata: + name: ferretdb-autoscale-ops + namespace: demo +spec: + databaseRef: + name: ferretdb-autoscale + compute: + ferretdb: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 400m + memory: 400Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" \ No newline at end of file diff --git a/docs/examples/ferretdb/autoscaling/compute/ferretdb-autoscale.yaml b/docs/examples/ferretdb/autoscaling/compute/ferretdb-autoscale.yaml new file mode 100644 index 0000000000..c33bfa99d7 --- /dev/null +++ b/docs/examples/ferretdb/autoscaling/compute/ferretdb-autoscale.yaml @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: ferretdb-autoscale + namespace: demo +spec: + version: "1.23.0" + replicas: 1 + backend: + externallyManaged: false + podTemplate: + spec: + containers: + - name: ferretdb + resources: + requests: + cpu: "200m" + memory: "300Mi" + limits: + cpu: "200m" + memory: "300Mi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/ferretdb/monitoring/coreos-prom-fr.yaml b/docs/examples/ferretdb/monitoring/coreos-prom-fr.yaml new file mode 100644 index 0000000000..0be5b868b0 --- /dev/null +++ b/docs/examples/ferretdb/monitoring/coreos-prom-fr.yaml @@ -0,0 +1,24 @@ +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: coreos-prom-fr + namespace: demo +spec: + version: "1.23.0" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + backend: + externallyManaged: false + deletionPolicy: WipeOut + replicas: 2 + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s \ No newline at end of file diff --git a/docs/examples/ferretdb/tls/ferretdb-tls.yaml b/docs/examples/ferretdb/tls/ferretdb-tls.yaml new file mode 100644 index 0000000000..1f3e2c1d5b --- /dev/null +++ b/docs/examples/ferretdb/tls/ferretdb-tls.yaml @@ -0,0 +1,25 @@ +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: fr-tls + namespace: demo +spec: + version: "1.23.0" + authSecret: + externallyManaged: false + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + backend: + externallyManaged: false + deletionPolicy: WipeOut + replicas: 1 + sslMode: requireSSL + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: ferretdb-ca-issuer \ No newline at end of file diff --git a/docs/examples/ferretdb/tls/issuer.yaml b/docs/examples/ferretdb/tls/issuer.yaml new file mode 100644 index 0000000000..21558c9037 --- /dev/null +++ b/docs/examples/ferretdb/tls/issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ferretdb-ca-issuer + namespace: demo +spec: + ca: + secretName: ferretdb-ca \ No newline at end of file diff --git a/docs/guides/ferretdb/autoscaler/compute/_index.md b/docs/guides/ferretdb/autoscaler/compute/_index.md new file mode 100644 index 0000000000..073175de71 --- /dev/null +++ b/docs/guides/ferretdb/autoscaler/compute/_index.md @@ -0,0 +1,10 @@ +--- +title: Compute Autoscaling +menu: + docs_{{ .version }}: + identifier: fr-compute-auto-scaling + name: Compute Autoscaling + parent: fr-auto-scaling + weight: 46 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md b/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md new file mode 100644 index 0000000000..f467188747 --- /dev/null +++ b/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md @@ -0,0 +1,406 @@ +--- +title: FerretDB Autoscaling +menu: + docs_{{ .version }}: + identifier: fr-auto-scaling-ferretdb + name: ferretdbCompute + parent: fr-compute-auto-scaling + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of a FerretDB + +This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a FerretDB. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md) + - [FerretDBAutoscaler](/docs/guides/ferretdb/concepts/autoscaler.md) + - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md) + - [Compute Resource Autoscaling Overview](/docs/guides/ferretdb/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/ferretdb](/docs/examples/ferretdb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of FerretDB + +Here, we are going to deploy a `FerretDB` standalone using a supported version by `KubeDB` operator. Backend postgres of this FerretDB will be internally managed by KubeDB, or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/ferretdb/concepts/appbinding.md) yourself. +Then we are going to apply `FerretDBAutoscaler` to set up autoscaling. + +#### Deploy FerretDB + +In this section, we are going to deploy a FerretDB with version `1.23.0` Then, in the next section we will set up autoscaling for this ferretdb using `FerretDBAutoscaler` CRD. Below is the YAML of the `FerretDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: ferretdb-autoscale + namespace: demo +spec: + version: "1.23.0" + replicas: 1 + backend: + externallyManaged: false + podTemplate: + spec: + containers: + - name: ferretdb + resources: + requests: + cpu: "200m" + memory: "300Mi" + limits: + cpu: "200m" + memory: "300Mi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + deletionPolicy: WipeOut +``` + +Let's create the `FerretDB` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/autoscaling/compute/ferretdb-autoscale.yaml +ferretdb.kubedb.com/ferretdb-autoscale created +``` + +Now, wait until `ferretdb-autoscale` has status `Ready`. i.e, + +```bash +$ kubectl get fr -n demo +NAME NAMESPACE VERSION STATUS AGE +ferretdb-autoscale demo 1.23.0 Ready 6m1s +``` + +Let's check the FerretDB resources, +```bash +$ kubectl get ferretdb -n demo ferretdb-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "cpu": "200m", + "memory": "300Mi" + }, + "requests": { + "cpu": "200m", + "memory": "300Mi" + } +} +``` + +You can see from the above outputs that the resources are same as the one we have assigned while deploying the ferretdb. + +We are now ready to apply the `FerretDBAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute (cpu and memory) autoscaling using a FerretDBAutoscaler Object. + +#### Create FerretDBAutoscaler Object + +In order to set up compute resource autoscaling for this ferretdb, we have to create a `FerretDBAutoscaler` CRO with our desired configuration. Below is the YAML of the `FerretDBAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: FerretDBAutoscaler +metadata: + name: ferretdb-autoscale-ops + namespace: demo +spec: + databaseRef: + name: ferretdb-autoscale + compute: + ferretdb: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 400m + memory: 400Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource autoscaling on `ferretdb-autoscale`. +- `spec.compute.ferretdb.trigger` specifies that compute resource autoscaling is enabled for this ferretdb. +- `spec.compute.ferretdb.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. + If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.ferretdb.minAllowed` specifies the minimum allowed resources for this ferretdb. +- `spec.compute.ferretdb.maxAllowed` specifies the maximum allowed resources for this ferretdb. +- `spec.compute.ferretdb.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.ferretdb.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 2 fields. Know more about them here : [timeout](/docs/guides/ferretdb/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/ferretdb/concepts/opsrequest.md#specapply). + +Let's create the `FerretDBAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/autoscaling/compute/autoscaler.yaml +ferretdbautoscaler.autoscaling.kubedb.com/ferretdb-autoscaler-ops created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `ferretdbautoscaler` resource is created successfully, + +```bash +$ kubectl get ferretdbautoscaler -n demo +NAME AGE +ferretdb-autoscale-ops 6m55s + +$ kubectl describe ferretdbautoscaler ferretdb-autoscale-ops -n demo +Name: ferretdb-autoscale-ops +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: FerretDBAutoscaler +Metadata: + Creation Timestamp: 2024-10-14T08:30:37Z + Generation: 1 + Resource Version: 11066 + UID: 62387d58-1cd2-4cb6-9d97-91515531fcea +Spec: + Compute: + Ferretdb: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 1Gi + Min Allowed: + Cpu: 400m + Memory: 400Mi + Pod Life Time Threshold: 5m + Resource Diff Percentage: 20 + Trigger: On + Database Ref: + Name: ferretdb-autoscale +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Reference Timestamp: 2024-10-14T08:30:00Z + Total Weight: 0.2536082343117003 + First Sample Start: 2024-10-14T08:31:16Z + Last Sample Start: 2024-10-14T08:32:08Z + Last Update Time: 2024-10-14T08:32:34Z + Memory Histogram: + Reference Timestamp: 2024-10-14T08:35:00Z + Ref: + Container Name: ferretdb + Vpa Object Name: ferretdb-autoscale + Total Samples Count: 2 + Version: v3 + Conditions: + Last Transition Time: 2024-10-14T08:32:29Z + Message: Successfully created FerretDBOpsRequest demo/frops-ferretdb-autoscale-5eo9wo + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-10-14T08:31:34Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: ferretdb + Lower Bound: + Cpu: 400m + Memory: 400Mi + Target: + Cpu: 400m + Memory: 400Mi + Uncapped Target: + Cpu: 100m + Memory: 262144k + Upper Bound: + Cpu: 1 + Memory: 1Gi + Vpa Name: ferretdb-autoscale +Events: +``` +So, the `ferretdbautoscaler` resource is created successfully. + +you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our ferretdb. Our autoscaler operator continuously watches the recommendation generated and creates an `ferretdbopsrequest` based on the recommendations, if the ferretdb pods are needed to scaled up or down. + +Let's watch the `ferretdbopsrequest` in the demo namespace to see if any `ferretdbopsrequest` object is created. After some time you'll see that a `ferretdbopsrequest` will be created based on the recommendation. + +```bash +$ watch kubectl get ferretdbopsrequest -n demo +Every 2.0s: kubectl get ferretdbopsrequest -n demo +NAME TYPE STATUS AGE +frops-ferretdb-autoscale-5eo9wo VerticalScaling Progressing 10s +``` + +Let's wait for the ops request to become successful. + +```bash +$ watch kubectl get ferretdbopsrequest -n demo +Every 2.0s: kubectl get ferretdbopsrequest -n demo +NAME TYPE STATUS AGE +frops-ferretdb-autoscale-5eo9wo VerticalScaling Successful 31s +``` + +We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed to scale the ferretdb. + +```bash +$ kubectl describe ferretdbopsrequest -n demo frops-ferretdb-autoscale-5eo9wo +Name: frops-ferretdb-autoscale-5eo9wo +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=ferretdb-autoscale + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=ferretdbs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: FerretDBOpsRequest +Metadata: + Creation Timestamp: 2024-10-14T08:32:29Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: FerretDBAutoscaler + Name: ferretdb-autoscale-ops + UID: 62387d58-1cd2-4cb6-9d97-91515531fcea + Resource Version: 11153 + UID: f14acbf1-bd46-4b93-9ee7-d944d9f1f8fd +Spec: + Apply: IfReady + Database Ref: + Name: ferretdb-autoscale + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 400m + Memory: 400Mi + Requests: + Cpu: 400m + Memory: 400Mi +Status: + Conditions: + Last Transition Time: 2024-10-14T08:32:29Z + Message: FerretDB ops-request has started to vertically scaling the FerretDB nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-14T08:32:32Z + Message: Successfully paused database + Observed Generation: 1 + Reason: DatabasePauseSucceeded + Status: True + Type: DatabasePauseSucceeded + Last Transition Time: 2024-10-14T08:32:32Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-14T08:32:37Z + Message: get pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Observed Generation: 1 + Status: True + Type: GetPod--ferretdb-autoscale-0 + Last Transition Time: 2024-10-14T08:32:37Z + Message: evict pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Observed Generation: 1 + Status: True + Type: EvictPod--ferretdb-autoscale-0 + Last Transition Time: 2024-10-14T08:32:42Z + Message: check pod running; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--ferretdb-autoscale-0 + Last Transition Time: 2024-10-14T08:32:47Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-14T08:32:48Z + Message: Successfully completed the VerticalScaling for FerretDB + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m7s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/frops-ferretdb-autoscale-5eo9wo + Normal Starting 3m7s KubeDB Ops-manager Operator Pausing FerretDB database: demo/ferretdb-autoscale + Normal Successful 3m7s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/ferretdb-autoscale for FerretDBOpsRequest: frops-ferretdb-autoscale-5eo9wo + Normal UpdatePetSets 3m4s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 2m59s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Warning evict pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 2m59s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Warning check pod running; ConditionStatus:True; PodName:ferretdb-autoscale-0 2m54s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-autoscale-0 + Normal RestartPods 2m49s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 2m49s KubeDB Ops-manager Operator Resuming FerretDB database: demo/ferretdb-autoscale + Normal Successful 2m48s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb-autoscale for FerretDBOpsRequest: frops-ferretdb-autoscale-5eo9wo +``` + +Now, we are going to verify from the Pod, and the FerretDB yaml whether the resources of the ferretdb has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get ferretdb -n demo ferretdb-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "cpu": "400m", + "memory": "400Mi" + }, + "requests": { + "cpu": "400m", + "memory": "400Mi" + } +} +``` + + +The above output verifies that we have successfully auto-scaled the resources of the FerretDB. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete fr -n demo ferretdb-autoscale +kubectl delete ferretdbautoscaler -n demo ferretdb-autoscale-ops +``` \ No newline at end of file diff --git a/docs/guides/ferretdb/autoscaler/compute/overview.md b/docs/guides/ferretdb/autoscaler/compute/overview.md new file mode 100644 index 0000000000..04c9a98999 --- /dev/null +++ b/docs/guides/ferretdb/autoscaler/compute/overview.md @@ -0,0 +1,55 @@ +--- +title: FerretDB Compute Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: fr-auto-scaling-overview + name: Overview + parent: fr-compute-auto-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# FerretDB Compute Resource Autoscaling + +This guide will give an overview on how KubeDB Autoscaler operator autoscales the database compute resources i.e. cpu and memory using `FerretdbAutoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md) + - [FerretDBAutoscaler](/docs/guides/ferretdb/concepts/autoscaler.md) + - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md) + +## How Compute Autoscaling Works + +The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `FerretDB`. Open the image in a new tab to see the enlarged version. + +
+  Compute Auto Scaling process of FerretDB +
Fig: Compute Auto Scaling process of FerretDB
+
+ +The Auto Scaling process consists of the following steps: + +1. At first, a user creates a `FerretDB` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `FerretDB` CRO. + +3. When the operator finds a `FerretDB` CRO, it creates `PetSet` and related necessary stuff like secrets, services, etc. + +4. Then, in order to set up autoscaling of `FerretDB`, the user creates a `FerretDBAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `FerretDBAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator generates recommendation using the modified version of kubernetes [official recommender](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler/pkg/recommender) for different components of the database, as specified in the `FerretDBAutoscaler` CRO. + +7. If the generated recommendation doesn't match the current resources of the database, then `KubeDB` Autoscaler operator creates a `FerretDBOpsRequest` CRO to scale the ferretdb to match the recommendation generated. + +8. `KubeDB` Ops-manager operator watches the `FerretDBOpsRequest` CRO. + +9. Then the `KubeDB` Ops-manager operator will scale the ferretdb vertically as specified on the `FerretDBOpsRequest` CRO. + +In the next docs, we are going to show a step-by-step guide on Autoscaling of FerretDB using `FerretDBAutoscaler` CRD. diff --git a/docs/guides/ferretdb/monitoring/_index.md b/docs/guides/ferretdb/monitoring/_index.md new file mode 100644 index 0000000000..cbafc7fe86 --- /dev/null +++ b/docs/guides/ferretdb/monitoring/_index.md @@ -0,0 +1,10 @@ +--- +title: Monitoring FerretDB +menu: + docs_{{ .version }}: + identifier: fr-monitoring-ferretdb + name: Monitoring + parent: fr-pgpool-guides + weight: 50 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/ferretdb/monitoring/overview.md b/docs/guides/ferretdb/monitoring/overview.md new file mode 100644 index 0000000000..6b4c0954d0 --- /dev/null +++ b/docs/guides/ferretdb/monitoring/overview.md @@ -0,0 +1,86 @@ +--- +title: FerretDB Monitoring Overview +description: FerretDB Monitoring Overview +menu: + docs_{{ .version }}: + identifier: fr-monitoring-overview + name: Overview + parent: fr-monitoring-ferretdb + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring FerretDB with KubeDB + +KubeDB has native support for monitoring via [Prometheus](https://prometheus.io/). You can use builtin [Prometheus](https://github.com/prometheus/prometheus) scraper or [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) to monitor KubeDB managed databases. This tutorial will show you how database monitoring works with KubeDB and how to configure Database crd to enable monitoring. + +## Overview + +KubeDB uses Prometheus [exporter](https://prometheus.io/docs/instrumenting/exporters/#databases) images to export Prometheus metrics for respective databases. Following diagram shows the logical flow of database monitoring with KubeDB. + +

+  Database Monitoring Flow +

+ +When a user creates a database crd with `spec.monitor` section configured, KubeDB operator provisions the respective database and injects an exporter image as sidecar to the database pod. It also creates a dedicated stats service with name `{database-crd-name}-stats` for monitoring. Prometheus server can scrape metrics using this stats service. + +## Configure Monitoring + +In order to enable monitoring for a database, you have to configure `spec.monitor` section. KubeDB provides following options to configure `spec.monitor` section: + +| Field | Type | Uses | +| -------------------------------------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `spec.monitor.agent` | `Required` | Type of the monitoring agent that will be used to monitor this database. It can be `prometheus.io/builtin` or `prometheus.io/operator`. | +| `spec.monitor.prometheus.exporter.port` | `Optional` | Port number where the exporter side car will serve metrics. | +| `spec.monitor.prometheus.exporter.args` | `Optional` | Arguments to pass to the exporter sidecar. | +| `spec.monitor.prometheus.exporter.env` | `Optional` | List of environment variables to set in the exporter sidecar container. | +| `spec.monitor.prometheus.exporter.resources` | `Optional` | Resources required by exporter sidecar container. | +| `spec.monitor.prometheus.exporter.securityContext` | `Optional` | Security options the exporter should run with. | +| `spec.monitor.prometheus.serviceMonitor.labels` | `Optional` | Labels for `ServiceMonitor` crd. | +| `spec.monitor.prometheus.serviceMonitor.interval` | `Optional` | Interval at which metrics should be scraped. | + +## Sample Configuration + +A sample YAML for FerretDB crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: sample-ferretdb + namespace: databases +spec: + version: "4.5.0" + deletionPolicy: WipeOut + postgresRef: + name: ha-postgres + namespace: demo + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + exporter: + resources: + requests: + memory: 512Mi + cpu: 200m + limits: + memory: 512Mi + cpu: 250m + securityContext: + runAsUser: 70 + allowPrivilegeEscalation: false +``` + +Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in databases namespace and this `ServiceMonitor` will have `release: prometheus` label. + +## Next Steps + +- Learn how to monitor FerretDB database with KubeDB using [builtin-Prometheus](/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md) +- Learn how to monitor FerretDB database with KubeDB using [Prometheus operator](/docs/guides/ferretdb/monitoring/using-prometheus-operator.md). + diff --git a/docs/guides/ferretdb/monitoring/using-prometheus-operator.md b/docs/guides/ferretdb/monitoring/using-prometheus-operator.md new file mode 100644 index 0000000000..303a39920d --- /dev/null +++ b/docs/guides/ferretdb/monitoring/using-prometheus-operator.md @@ -0,0 +1,363 @@ +--- +title: Monitor FerretDB using Prometheus Operator +menu: + docs_{{ .version }}: + identifier: fr-using-prometheus-operator-monitoring + name: Prometheus Operator + parent: fr-monitoring-ferretdb + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring FerretDB Using Prometheus operator + +[Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) provides simple and Kubernetes native way to deploy and configure Prometheus server. This tutorial will show you how to use Prometheus operator to monitor FerretDB database deployed with KubeDB. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/ferretdb/monitoring/overview.md). + +- We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, you can deploy one using this helm chart [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy the prometheus operator helm chart. We are going to deploy database in `demo` namespace. + + ```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/ferretdb](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/ferretdb) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find out required labels for ServiceMonitor + +We need to know the labels used to select `ServiceMonitor` by a `Prometheus` crd. We are going to provide these labels in `spec.monitor.prometheus.serviceMonitor.labels` field of FerretDB crd so that KubeDB creates `ServiceMonitor` object accordingly. + +At first, let's find out the available Prometheus server in our cluster. + +```bash +$ kubectl get prometheus --all-namespaces +NAMESPACE NAME VERSION DESIRED READY RECONCILED AVAILABLE AGE +monitoring prometheus-kube-prometheus-prometheus v2.54.1 1 1 True True 13m +``` + +> If you don't have any Prometheus server running in your cluster, deploy one following the guide specified in **Before You Begin** section. + +Now, let's view the YAML of the available Prometheus server `prometheus` in `monitoring` namespace. +```bash +$ kubectl get prometheus -n monitoring prometheus-kube-prometheus-prometheus -o yaml +``` +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + meta.helm.sh/release-name: prometheus + meta.helm.sh/release-namespace: monitoring + creationTimestamp: "2024-10-14T17:17:25Z" + generation: 1 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 65.2.0 + chart: kube-prometheus-stack-65.2.0 + heritage: Helm + release: prometheus + name: prometheus-kube-prometheus-prometheus + namespace: monitoring + resourceVersion: "58118" + uid: b1bf237b-2fdc-459c-b92f-e087a1119f33 +spec: + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-kube-prometheus-alertmanager + namespace: monitoring + pathPrefix: / + port: http-web + automountServiceAccountToken: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: http://prometheus-kube-prometheus-prometheus.monitoring:9090 + hostNetwork: false + image: quay.io/prometheus/prometheus:v2.54.1 + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: + matchLabels: + release: prometheus + portName: http-web + probeNamespaceSelector: {} + probeSelector: + matchLabels: + release: prometheus + replicas: 1 + retention: 10d + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: + matchLabels: + release: prometheus + scrapeConfigNamespaceSelector: {} + scrapeConfigSelector: + matchLabels: + release: prometheus + scrapeInterval: 30s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + serviceAccountName: prometheus-kube-prometheus-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: + matchLabels: + release: prometheus + shards: 1 + tsdb: + outOfOrderTimeWindow: 0s + version: v2.54.1 + walCompression: true +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-10-14T17:27:17Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Available + - lastTransitionTime: "2024-10-14T17:27:17Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Reconciled + paused: false + replicas: 1 + selector: app.kubernetes.io/instance=prometheus-kube-prometheus-prometheus,app.kubernetes.io/managed-by=prometheus-operator,app.kubernetes.io/name=prometheus,operator.prometheus.io/name=prometheus-kube-prometheus-prometheus,prometheus=prometheus-kube-prometheus-prometheus + shardStatuses: + - availableReplicas: 1 + replicas: 1 + shardID: "0" + unavailableReplicas: 0 + updatedReplicas: 1 + shards: 1 + unavailableReplicas: 0 + updatedReplicas: 1 +``` + +Notice the `spec.serviceMonitorSelector` section. Here, `release: prometheus` label is used to select `ServiceMonitor` crd. So, we are going to use this label in `spec.monitor.prometheus.serviceMonitor.labels` field of FerretDB crd. + +## Deploy FerretDB with Monitoring Enabled + +At first, let's deploy an FerretDB database with monitoring enabled. Below is the FerretDB object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: coreos-prom-fr + namespace: demo +spec: + version: "1.23.0" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + backend: + externallyManaged: false + deletionPolicy: WipeOut + replicas: 2 + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s +``` + +Here, + +- `monitor.agent: prometheus.io/operator` indicates that we are going to monitor this server using Prometheus operator. +- `monitor.prometheus.serviceMonitor.labels` specifies that KubeDB should create `ServiceMonitor` with these labels. +- `monitor.prometheus.interval` indicates that the Prometheus server should scrape metrics from this database with 10 seconds interval. + +Let's create the FerretDB object that we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/monitoring/coreos-prom-fr.yaml +ferretdb.kubedb.com/coreos-prom-fr created +``` + +Now, wait for the database to go into `Running` state. + +```bash +$ kubectl get fr -n demo coreos-prom-fr +NAME NAMESPACE VERSION STATUS AGE +coreos-prom-fr demo 1.23.0 Ready 111s +``` + +KubeDB will create a separate stats service with name `{FerretDB crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=coreos-prom-fr" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +coreos-prom-fr ClusterIP 10.96.234.253 27017/TCP 2m16s +coreos-prom-fr-stats ClusterIP 10.96.27.143 56790/TCP 2m16s +``` + +Here, `coreos-prom-fr-stats` service has been created for monitoring purpose. + +Let's describe this stats service. + +```bash +$ kubectl describe svc -n demo coreos-prom-fr-stats +``` +```yaml +Name: coreos-prom-fr-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=coreos-prom-fr + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=ferretdbs.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/operator +Selector: app.kubernetes.io/instance=coreos-prom-fr,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=ferretdbs.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.27.143 +IPs: 10.96.27.143 +Port: metrics 56790/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.59:8080,10.244.0.60:8080 +Session Affinity: None +Events: +``` + +Notice the `Labels` and `Port` fields. `ServiceMonitor` will use this information to target its endpoints. + +KubeDB will also create a `ServiceMonitor` crd in `demo` namespace that select the endpoints of `coreos-prom-fr-stats` service. Verify that the `ServiceMonitor` crd has been created. + +```bash +$ kubectl get servicemonitor -n demo +NAME AGE +coreos-prom-fr-pg-backend-stats 3m33s +coreos-prom-fr-stats 2m24s +``` + +> If backend Postgres is managed by KubeDB, KubeDB operator will also enable monitoring to backend Postgres. That's why `coreos-prom-fr-pg-backend-stats` `ServiceMonitor` also created. +To look at the more details of KubeDB managed Postgres monitoring, you can look at [this documentation](/docs/guides/postgres/monitoring/using-prometheus-operator.md). + +Let's verify that the `ServiceMonitor` has the label that we had specified in `spec.monitor` section of FerretDB crd. + +```bash +$ kubectl get servicemonitor -n demo coreos-prom-fr-stats -o yaml +``` +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + creationTimestamp: "2024-10-14T17:35:38Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: coreos-prom-fr + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: ferretdbs.kubedb.com + release: prometheus + name: coreos-prom-fr-stats + namespace: demo + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: coreos-prom-fr-stats + uid: d0811d68-6e31-4357-b35a-8a7793ab4918 + resourceVersion: "59094" + uid: cfa29869-8000-44fe-bc9b-e7e78b08da36 +spec: + endpoints: + - honorLabels: true + interval: 10s + path: /debug/metrics + port: metrics + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: coreos-prom-fr + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: ferretdbs.kubedb.com + kubedb.com/role: stats +``` + +Notice that the `ServiceMonitor` has label `release: prometheus` that we had specified in FerretDB crd. + +Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `coreos-prom-fr-stats` service. It also, target the `metrics` port that we have seen in the stats service. + +## Verify Monitoring Metrics + +At first, let's find out the respective Prometheus pod for `prometheus` Prometheus server. + +```bash +$ kubectl get pod -n monitoring -l=app.kubernetes.io/name=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 27m +``` + +Prometheus server is listening to port `9090` of `prometheus-prometheus-kube-prometheus-prometheus-0` pod. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +Run following command on a separate terminal to forward the port 9090 of `prometheus-prometheus-kube-prometheus-prometheus-0` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-prometheus-kube-prometheus-prometheus-0 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `metrics` endpoint of `coreos-prom-fr-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the `endpoint` and `service` labels marked by the red rectangles. It verifies that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create a beautiful dashboard with collected metrics. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run following commands + +```bash +kubectl delete -n demo pp/coreos-prom-pp +kubectl delete -n demo pg/ha-postgres +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your FerretDB database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md). +- Detail concepts of [FerretDB object](/docs/guides/ferretdb/concepts/ferretdb.md). +- Detail concepts of [FerretDBVersion object](/docs/guides/ferretdb/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/ferretdb/quickstart/quickstart.md b/docs/guides/ferretdb/quickstart/quickstart.md index d1b66499ef..1b9ae049cc 100644 --- a/docs/guides/ferretdb/quickstart/quickstart.md +++ b/docs/guides/ferretdb/quickstart/quickstart.md @@ -136,7 +136,7 @@ Metadata: UID: 73247297-139b-4dfe-8f9d-9baf2b092364 Spec: Auth Secret: - Name: ferret-pg-backend-auth + Name: ferret-auth Backend: Externally Managed: false Linked DB: ferretdb @@ -222,9 +222,6 @@ Status: $ kubectl get petset -n demo NAME READY AGE ferret 1/1 29m - -$ kubectl get petset -n demo -NAME READY AGE ferret-pg-backend 2/2 30m ferret-pg-backend-arbiter 1/1 29m @@ -273,7 +270,7 @@ metadata: uid: 73247297-139b-4dfe-8f9d-9baf2b092364 spec: authSecret: - name: ferret-pg-backend-auth + name: ferret-auth backend: externallyManaged: false linkedDB: ferretdb @@ -355,16 +352,16 @@ status: phase: Ready ``` -Please note that KubeDB operator has created a new Secret called `ferret-pg-backend-auth` *(format: {ferretdb-object-name}-backend-auth)* for storing the password for `postgres` superuser. This secret contains a `username` key which contains the *username* for FerretDB superuser and a `password` key which contains the *password* for FerretDB superuser. +Please note that KubeDB operator has created a new Secret called `ferret-auth` *(format: {ferretdb-object-name}-auth)* for storing the password for `postgres` superuser. This secret contains a `username` key which contains the *username* for FerretDB superuser and a `password` key which contains the *password* for FerretDB superuser. If you want to use custom or existing secret please specify that when creating the FerretDB object using `spec.authSecret.name`. While creating this secret manually, make sure the secret contains these two keys containing data `username` and `password`. For more details, please see [here](/docs/guides/mongodb/concepts/mongodb.md#specauthsecret). Now, you can connect to this database by port-forwarding primary service `ferret` and connecting with [mongo-shell](https://www.mongodb.com/try/download/shell) locally ```bash -$ kubectl get secrets -n demo ferret-pg-backend-auth -o jsonpath='{.data.\username}' | base64 -d +$ kubectl get secrets -n demo ferret-auth -o jsonpath='{.data.\username}' | base64 -d postgres -$ kubectl get secrets -n demo ferret-pg-backend-auth -o jsonpath='{.data.\\password}' | base64 -d +$ kubectl get secrets -n demo ferret-auth -o jsonpath='{.data.\\password}' | base64 -d UxV5a35kURSFE(;5 $ kubectl port-forward svc/ferret -n demo 27017 diff --git a/docs/guides/ferretdb/tls/_index.md b/docs/guides/ferretdb/tls/_index.md new file mode 100644 index 0000000000..aabd98c8c5 --- /dev/null +++ b/docs/guides/ferretdb/tls/_index.md @@ -0,0 +1,10 @@ +--- +title: Run FerretDB with TLS +menu: + docs_{{ .version }}: + identifier: fr-tls + name: TLS/SSL Encryption + parent: fr-ferretdb-guides + weight: 45 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/ferretdb/tls/configure_tls.md b/docs/guides/ferretdb/tls/configure_tls.md new file mode 100644 index 0000000000..fbcb9a6d04 --- /dev/null +++ b/docs/guides/ferretdb/tls/configure_tls.md @@ -0,0 +1,244 @@ +--- +title: FerretDB TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: fr-tls-configure + name: FerretDB_SSL + parent: fr-tls + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run FerretDB with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption (via, `sslMode`) for FerretDB. This tutorial will show you how to use KubeDB to run a FerretDB database with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/ferretdb](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/ferretdb) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Mongodb. + +- `spec:` + - `sslMode` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [ferretdb concept](/docs/guides/ferretdb/concepts/ferretdb.md), + +`sslMode` enables TLS/SSL or mixed TLS/SSL used for all network connections. The value of `sslMode` field can be one of the following: + +| Value | Description | +| :----------: | :----------------------------------------------------------------------------------------------------------------------------- | +| `disabled` | The server does not use TLS/SSL. | +| `requireSSL` | The server uses and accepts only TLS/SSL encrypted connections. | + +The specified ssl mode will be used by health checker and exporter of FerretDB. + +When, SSLMode is anything other than `disabled`, users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.pem`, `tls.crt` and `tls.key`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in FerretDB. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ferretdb/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls ferretdb-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ferretdb-ca-issuer + namespace: demo +spec: + ca: + secretName: ferretdb-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/tls/issuer.yaml +issuer.cert-manager.io/ferretdb-ca-issuer created +``` + +## TLS/SSL encryption in FerretDB + +Below is the YAML for FerretDB with TLS enabled. Backend Postgres will automatically managed by KubeDB: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: FerretDB +metadata: + name: fr-tls + namespace: demo +spec: + version: "1.23.0" + authSecret: + externallyManaged: false + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + backend: + externallyManaged: false + deletionPolicy: WipeOut + replicas: 1 + sslMode: requireSSL + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: ferretdb-ca-issuer +``` + +### Deploy FerretDB + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/tls/ferretdb-tls.yaml +ferretdb.kubedb.com/pp-tls created +``` + +Now, wait until `pp-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get fr -n demo +Every 2.0s: kubectl get ferretdb -n demo +NAME TYPE VERSION STATUS AGE +fr-tls kubedb.com/v1alpha2 1.23.0 Ready 60s +``` + +### Verify TLS/SSL in FerretDB + +Now, connect to this database through [mongosh](https://www.mongodb.com/docs/mongodb-shell/) and verify if `SSLMode` has been set up as intended (i.e, `require`). + +```bash +$ kubectl describe secret -n demo fr-tls-client-cert +Name: fr-tls-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=fr-tls + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=ferretdbs.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + cert-manager.io/certificate-name: fr-tls-client-cert + cert-manager.io/common-name: fr-tls + cert-manager.io/ip-sans: + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: ferretdb-ca-issuer + cert-manager.io/subject-organizationalunits: client + cert-manager.io/subject-organizations: kubedb + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +ca.crt: 1155 bytes +tls.crt: 1176 bytes +tls.key: 1679 bytes +``` + +Now we need save the client cert and key to two different files and make a pem file. +Additionally, to verify server, we need to store ca.crt. + +```bash +$ kubectl get secrets -n demo fr-tls-client-cert -o jsonpath='{.data.tls\.crt}' | base64 -d > client.crt +$ kubectl get secrets -n demo fr-tls-client-cert -o jsonpath='{.data.tls\.key}' | base64 -d > client.key +$ kubectl get secrets -n demo fr-tls-client-cert -o jsonpath='{.data.ca\.crt}' | base64 -d > ca.crt +$ cat client.crt client.key > client.pem +``` + +Now, we can connect to our FerretDB with these files with mongosh client. + +```bash +$ kubectl get secrets -n demo fr-tls-auth -o jsonpath='{.data.\username}' | base64 -d +postgres +$ kubectl get secrets -n demo fr-tls-auth -o jsonpath='{.data.\\password}' | base64 -d +l*jGp8u*El8WRSDJ + +$ kubectl port-forward svc/fr-tls -n demo 27017 +Forwarding from 127.0.0.1:27017 -> 27018 +Forwarding from [::1]:27017 -> 27018 +Handling connection for 27017 +Handling connection for 27017 +``` + +Now in another terminal + +```bash +$ mongosh 'mongodb://postgres:l*jGp8u*El8WRSDJ@localhost:27017/ferretdb?authMechanism=PLAIN&tls=true&tlsCertificateKeyFile=./client.pem&tlsCaFile=./ca.crt' +Current Mongosh Log ID: 65efeea2a3347fff66d04c70 +Connecting to: mongodb://@localhost:27017/ferretdb?authMechanism=PLAIN&directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5 +Using MongoDB: 7.0.42 +Using Mongosh: 2.1.5 + +For mongosh info see: https://docs.mongodb.com/mongodb-shell/ + +------ + The server generated these startup warnings when booting + 2024-03-12T05:56:50.979Z: Powered by FerretDB v1.18.0 and PostgreSQL 13.13 on x86_64-pc-linux-musl, compiled by gcc. + 2024-03-12T05:56:50.979Z: Please star us on GitHub: https://github.com/FerretDB/FerretDB. + 2024-03-12T05:56:50.979Z: The telemetry state is undecided. + 2024-03-12T05:56:50.979Z: Read more about FerretDB telemetry and how to opt out at https://beacon.ferretdb.io. +------ + +ferretdb> +``` + +So our connection is now tls encrypted. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ferretdb -n demo fr-tls +kubectl delete issuer -n demo ferretdb-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [FerretDB object](/docs/guides/ferretdb/concepts/ferretdb.md). +- Detail concepts of [FerretDBVersion object](/docs/guides/ferretdb/concepts/catalog.md). +- Monitor your FerretDB database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/ferretdb/monitoring/using-prometheus-operator.md). +- Monitor your FerretDB database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/ferretdb/tls/overview.md b/docs/guides/ferretdb/tls/overview.md new file mode 100644 index 0000000000..09f50ed00d --- /dev/null +++ b/docs/guides/ferretdb/tls/overview.md @@ -0,0 +1,70 @@ +--- +title: FerretDB TLS/SSL Encryption Overview +menu: + docs_{{ .version }}: + identifier: fr-tls-overview + name: Overview + parent: fr-tls + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# FerretDB TLS/SSL Encryption + +**Prerequisite :** To configure TLS/SSL in `FerretDB`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +To issue a certificate, the following crd of `cert-manager` is used: + +- `Issuer/ClusterIssuer`: Issuers, and ClusterIssuers represent certificate authorities (CAs) that are able to generate signed certificates by honoring certificate signing requests. All cert-manager certificates require a referenced issuer that is in a ready condition to attempt to honor the request. You can learn more details [here](https://cert-manager.io/docs/concepts/issuer/). + +- `Certificate`: `cert-manager` has the concept of Certificates that define a desired x509 certificate which will be renewed and kept up to date. You can learn more details [here](https://cert-manager.io/docs/concepts/certificate/). + +**FerretDB CRD Specification :** + +KubeDB uses following crd fields to enable SSL/TLS encryption in `FerretDB`. + +- `spec:` + - `sslMode` + - `tls:` + - `issuerRef` + - `certificates` + - `cientAuthMode` + Read about the fields in details from [ferretdb concept](/docs/guides/ferretdb/concepts/ferretdb.md), + +When, `sslMode` is set to `require`, the users must specify the `tls.issuerRef` field. `KubeDB` uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets using `Issuer/ClusterIssuers` specification. These certificates secrets including `ca.crt`, `tls.crt` and `tls.key` etc. are used to configure `FerretDB` server, exporter etc. respectively. + +## How TLS/SSL configures in FerretDB + +The following figure shows how `KubeDB` enterprise used to configure TLS/SSL in FerretDB. Open the image in a new tab to see the enlarged version. + +
+Deploy FerretDB with TLS/SSL +
Fig: Deploy FerretDB with TLS/SSL
+
+ +Deploying FerretDB with TLS/SSL configuration process consists of the following steps: + +1. At first, a user creates a `Issuer/ClusterIssuer` cr. + +2. Then the user creates a `FerretDB` cr which refers to the `Issuer/ClusterIssuer` cr that the user created in the previous step. + +3. `KubeDB` Provisioner operator watches for the `FerretDB` cr. + +4. When it finds one, it creates `Secret`, `Service`, etc. for the `FerretDB` database. + +5. `KubeDB` Ops-manager operator watches for `FerretDB`(5c), `Issuer/ClusterIssuer`(5b), `Secret` and `Service`(5a). + +6. When it finds all the resources(`FerretDB`, `Issuer/ClusterIssuer`, `Secret`, `Service`), it creates `Certificates` by using `tls.issuerRef` and `tls.certificates` field specification from `FerretDB` cr. + +7. `cert-manager` watches for certificates. + +8. When it finds one, it creates certificate secrets `tls-secrets`(server, client, exporter secrets etc.) that holds the actual certificate signed by the CA. + +9. `KubeDB` Provisioner operator watches for the Certificate secrets `tls-secrets`. + +10. When it finds all the tls-secret, it creates the related `StatefulSets` so that FerretDB database can be configured with TLS/SSL. + +In the next doc, we are going to show a step-by-step guide on how to configure a `FerretDB` database with TLS/SSL. \ No newline at end of file diff --git a/docs/images/ferretdb/fr-compute-autoscaling.svg b/docs/images/ferretdb/fr-compute-autoscaling.svg new file mode 100644 index 0000000000..9a7e057654 --- /dev/null +++ b/docs/images/ferretdb/fr-compute-autoscaling.svg @@ -0,0 +1,4 @@ + + + +
PetSet's Pods
PetSet's Pods
FerretDB CR
1. Create
1. Create
Updated PetSet's Pods
Updated PetSet's Pods
FerretDB compute AutoScaler
FerretDB compute AutoSc...
KubeDB Provisioner Operator
KubeDB Provisioner O...
2. Watch
2. Watch
3. Create
3. Create
KubeDB AutoScaler Operator
KubeDB AutoScaler Op...
5. Watch
5. Watch
FerretDB OpsRequest
FerretDB OpsRequest
7. Create
7. Create
KubeDB OpsManager Operator
KubeDB OpsManager Op...
refers to
refers to
refers to
refers to
Recommendation
Recommendation
6. generate
6. generate
4. Create
4. Create
8. Watch
8. Watch
9. Scale
resources
9. Scale...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/images/ferretdb/fr-tls.svg b/docs/images/ferretdb/fr-tls.svg new file mode 100644 index 0000000000..de4e284ced --- /dev/null +++ b/docs/images/ferretdb/fr-tls.svg @@ -0,0 +1,4 @@ + + + +            Enterprise            Operator              Community            Operator
service
se...
secret
se...
tls-secret
tls-secret
Cert- manager
Cert- ma...
PetSet
Statef...
Issuer/Cluster Issuer
Issuer...
FerretDB
FerretDB
Certificates
Certif...
User
User
2.Create
2.Create
1.Create
1.Create
5a.Watch
5a.Watch
3.Watch
3.Watch
4.Create
4.Create
5c.Watch
5c.Watch
6.Create
6.Create
7.Watch
7.Watch
uses
uses
8.Create
8.Create
9.Watch
9.Watch
10.Create
10.Create
5b.Watch
5b.Watch
refers to
refers to
Text is not SVG - cannot display
\ No newline at end of file