diff --git a/docs/examples/ferretdb/monitoring/builtin-prom-fr.yaml b/docs/examples/ferretdb/monitoring/builtin-prom-fr.yaml
index df84bc129a..df3b0d3f31 100644
--- a/docs/examples/ferretdb/monitoring/builtin-prom-fr.yaml
+++ b/docs/examples/ferretdb/monitoring/builtin-prom-fr.yaml
@@ -4,7 +4,7 @@ metadata:
name: builtin-prom-fr
namespace: demo
spec:
- version: "1.18.0"
+ version: "1.23.0"
storage:
accessModes:
- ReadWriteOnce
diff --git a/docs/examples/ferretdb/reconfigure-tls/ferretdb.yaml b/docs/examples/ferretdb/reconfigure-tls/ferretdb.yaml
index 526c65c1dc..09b7b0f9ef 100644
--- a/docs/examples/ferretdb/reconfigure-tls/ferretdb.yaml
+++ b/docs/examples/ferretdb/reconfigure-tls/ferretdb.yaml
@@ -4,7 +4,7 @@ metadata:
name: ferretdb
namespace: demo
spec:
- version: "1.23.0"
+ version: "1.18.0"
storage:
accessModes:
- ReadWriteOnce
diff --git a/docs/examples/ferretdb/reconfigure-tls/frops-change-issuer.yaml b/docs/examples/ferretdb/reconfigure-tls/frops-change-issuer.yaml
new file mode 100644
index 0000000000..38df852bbb
--- /dev/null
+++ b/docs/examples/ferretdb/reconfigure-tls/frops-change-issuer.yaml
@@ -0,0 +1,14 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: frops-change-issuer
+ namespace: demo
+spec:
+ type: ReconfigureTLS
+ databaseRef:
+ name: ferretdb
+ tls:
+ issuerRef:
+ name: fr-new-issuer
+ kind: Issuer
+ apiGroup: "cert-manager.io"
\ No newline at end of file
diff --git a/docs/examples/ferretdb/reconfigure-tls/frops-remove.yaml b/docs/examples/ferretdb/reconfigure-tls/frops-remove.yaml
new file mode 100644
index 0000000000..b2708bac37
--- /dev/null
+++ b/docs/examples/ferretdb/reconfigure-tls/frops-remove.yaml
@@ -0,0 +1,11 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: frops-remove
+ namespace: demo
+spec:
+ type: ReconfigureTLS
+ databaseRef:
+ name: ferretdb
+ tls:
+ remove: true
\ No newline at end of file
diff --git a/docs/examples/ferretdb/reconfigure-tls/new-issuer.yaml b/docs/examples/ferretdb/reconfigure-tls/new-issuer.yaml
new file mode 100644
index 0000000000..71d30275ee
--- /dev/null
+++ b/docs/examples/ferretdb/reconfigure-tls/new-issuer.yaml
@@ -0,0 +1,8 @@
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: fr-new-issuer
+ namespace: demo
+spec:
+ ca:
+ secretName: ferretdb-new-ca
\ No newline at end of file
diff --git a/docs/examples/ferretdb/restart/ferretdb.yaml b/docs/examples/ferretdb/restart/ferretdb.yaml
new file mode 100644
index 0000000000..b86eab911a
--- /dev/null
+++ b/docs/examples/ferretdb/restart/ferretdb.yaml
@@ -0,0 +1,17 @@
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: ferretdb
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
\ No newline at end of file
diff --git a/docs/examples/ferretdb/restart/ops.yaml b/docs/examples/ferretdb/restart/ops.yaml
new file mode 100644
index 0000000000..a8f7e3a8c4
--- /dev/null
+++ b/docs/examples/ferretdb/restart/ops.yaml
@@ -0,0 +1,11 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: restart-ferretdb
+ namespace: demo
+spec:
+ type: Restart
+ databaseRef:
+ name: ferretdb
+ timeout: 3m
+ apply: Always
\ No newline at end of file
diff --git a/docs/examples/ferretdb/scaling/fr-horizontal.yaml b/docs/examples/ferretdb/scaling/fr-horizontal.yaml
new file mode 100644
index 0000000000..bc542d43b9
--- /dev/null
+++ b/docs/examples/ferretdb/scaling/fr-horizontal.yaml
@@ -0,0 +1,17 @@
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-horizontal
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
\ No newline at end of file
diff --git a/docs/examples/ferretdb/scaling/fr-vertical-ops.yaml b/docs/examples/ferretdb/scaling/fr-vertical-ops.yaml
new file mode 100644
index 0000000000..e5de4d2b39
--- /dev/null
+++ b/docs/examples/ferretdb/scaling/fr-vertical-ops.yaml
@@ -0,0 +1,20 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-scale-vertical
+ namespace: demo
+spec:
+ type: VerticalScaling
+ databaseRef:
+ name: fr-vertical
+ verticalScaling:
+ node:
+ resources:
+ requests:
+ memory: "2Gi"
+ cpu: "1"
+ limits:
+ memory: "2Gi"
+ cpu: "1"
+ timeout: 5m
+ apply: IfReady
\ No newline at end of file
diff --git a/docs/examples/ferretdb/scaling/fr-vertical.yaml b/docs/examples/ferretdb/scaling/fr-vertical.yaml
new file mode 100644
index 0000000000..6809284bfb
--- /dev/null
+++ b/docs/examples/ferretdb/scaling/fr-vertical.yaml
@@ -0,0 +1,17 @@
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-vertical
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
\ No newline at end of file
diff --git a/docs/examples/ferretdb/scaling/frops-hscale-down-ops.yaml b/docs/examples/ferretdb/scaling/frops-hscale-down-ops.yaml
new file mode 100644
index 0000000000..c51199639e
--- /dev/null
+++ b/docs/examples/ferretdb/scaling/frops-hscale-down-ops.yaml
@@ -0,0 +1,11 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-horizontal-scale-down
+ namespace: demo
+spec:
+ type: HorizontalScaling
+ databaseRef:
+ name: fr-horizontal
+ horizontalScaling:
+ node: 2
\ No newline at end of file
diff --git a/docs/examples/ferretdb/scaling/frops-hscale-up-ops.yaml b/docs/examples/ferretdb/scaling/frops-hscale-up-ops.yaml
new file mode 100644
index 0000000000..b8c998e354
--- /dev/null
+++ b/docs/examples/ferretdb/scaling/frops-hscale-up-ops.yaml
@@ -0,0 +1,11 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-horizontal-scale-up
+ namespace: demo
+spec:
+ type: HorizontalScaling
+ databaseRef:
+ name: fr-horizontal
+ horizontalScaling:
+ node: 3
\ No newline at end of file
diff --git a/docs/examples/ferretdb/update-version/fr-update.yaml b/docs/examples/ferretdb/update-version/fr-update.yaml
new file mode 100644
index 0000000000..5759c89db6
--- /dev/null
+++ b/docs/examples/ferretdb/update-version/fr-update.yaml
@@ -0,0 +1,17 @@
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-update
+ namespace: demo
+spec:
+ version: "1.18.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
\ No newline at end of file
diff --git a/docs/examples/ferretdb/update-version/frops-update.yaml b/docs/examples/ferretdb/update-version/frops-update.yaml
new file mode 100644
index 0000000000..9b66daaf14
--- /dev/null
+++ b/docs/examples/ferretdb/update-version/frops-update.yaml
@@ -0,0 +1,11 @@
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-version-update
+ namespace: demo
+spec:
+ type: UpdateVersion
+ databaseRef:
+ name: fr-update
+ updateVersion:
+ targetVersion: 1.23.0
\ No newline at end of file
diff --git a/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md b/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md
index f467188747..a51212656d 100644
--- a/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md
+++ b/docs/guides/ferretdb/autoscaler/compute/compute-autoscale.md
@@ -3,7 +3,7 @@ title: FerretDB Autoscaling
menu:
docs_{{ .version }}:
identifier: fr-auto-scaling-ferretdb
- name: ferretdbCompute
+ name: Ferretdb Compute Autoscaling
parent: fr-compute-auto-scaling
weight: 15
menu_name: docs_{{ .version }}
diff --git a/docs/guides/ferretdb/concepts/catalog.md b/docs/guides/ferretdb/concepts/catalog.md
index af2be706af..e136e4d6d3 100644
--- a/docs/guides/ferretdb/concepts/catalog.md
+++ b/docs/guides/ferretdb/concepts/catalog.md
@@ -16,7 +16,7 @@ section_menu_id: guides
## What is FerretDBVersion
-`FerretDBVersion` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration to specify the docker images to be used for [FerretDB](https://ferretdb.net/) server deployed with KubeDB in a Kubernetes native way.
+`FerretDBVersion` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration to specify the docker images to be used for [FerretDB](https://ferretdb.com/) server deployed with KubeDB in a Kubernetes native way.
When you install KubeDB, a `FerretDBVersion` custom resource will be created automatically for every supported FerretDB release versions. You have to specify the name of `FerretDBVersion` crd in `spec.version` field of [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md) crd. Then, KubeDB will use the docker images specified in the `FerretDBVersion` crd to create your expected FerretDB instance.
diff --git a/docs/guides/ferretdb/concepts/ferretdb.md b/docs/guides/ferretdb/concepts/ferretdb.md
index bb1ca86ddb..deb8d2468b 100644
--- a/docs/guides/ferretdb/concepts/ferretdb.md
+++ b/docs/guides/ferretdb/concepts/ferretdb.md
@@ -216,7 +216,7 @@ FerretDB managed by KubeDB can be monitored with builtin-Prometheus and Promethe
### spec.deletionPolicy
-`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Pgpool` CR or which resources KubeDB should keep or delete when you delete `Pgpool` CR. KubeDB provides following four deletion policies:
+`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `FerretDB` CR or which resources KubeDB should keep or delete when you delete `FerretDB` CR. KubeDB provides following four deletion policies:
- DoNotTerminate
- Delete
@@ -224,7 +224,7 @@ FerretDB managed by KubeDB can be monitored with builtin-Prometheus and Promethe
When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`.
-Following table show what KubeDB does when you delete Pgpool CR for different deletion policies,
+Following table show what KubeDB does when you delete FerretDB CR for different deletion policies,
| Behavior | DoNotTerminate | Delete | WipeOut |
|---------------------------| :------------: |:------------:| :------: |
@@ -237,7 +237,7 @@ If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` deletion policy
### spec.podTemplate
-KubeDB allows providing a template for pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for Pgpool.
+KubeDB allows providing a template for pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for FerretDB.
KubeDB accept following fields to set in `spec.podTemplate:`
diff --git a/docs/guides/ferretdb/monitoring/_index.md b/docs/guides/ferretdb/monitoring/_index.md
index cbafc7fe86..40795496c4 100644
--- a/docs/guides/ferretdb/monitoring/_index.md
+++ b/docs/guides/ferretdb/monitoring/_index.md
@@ -4,7 +4,7 @@ menu:
docs_{{ .version }}:
identifier: fr-monitoring-ferretdb
name: Monitoring
- parent: fr-pgpool-guides
+ parent: fr-ferretdb-guides
weight: 50
menu_name: docs_{{ .version }}
---
\ No newline at end of file
diff --git a/docs/guides/ferretdb/monitoring/overview.md b/docs/guides/ferretdb/monitoring/overview.md
index 6b4c0954d0..bcbb7441d7 100644
--- a/docs/guides/ferretdb/monitoring/overview.md
+++ b/docs/guides/ferretdb/monitoring/overview.md
@@ -53,11 +53,16 @@ metadata:
name: sample-ferretdb
namespace: databases
spec:
- version: "4.5.0"
+ version: "1.23.0"
deletionPolicy: WipeOut
- postgresRef:
- name: ha-postgres
- namespace: demo
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
monitor:
agent: prometheus.io/operator
prometheus:
diff --git a/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md b/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md
index c1e04d313e..203f4a8d10 100644
--- a/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md
+++ b/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md
@@ -49,7 +49,7 @@ metadata:
name: builtin-prom-fr
namespace: demo
spec:
- version: "1.18.0"
+ version: "1.23.0"
storage:
accessModes:
- ReadWriteOnce
@@ -336,7 +336,7 @@ Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:909
-Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `FerretDB` database `builtin-prom-fr` through stats service `builtin-prom-fr-stats`.
+Check the labels. These labels confirm that the metrics are coming from `FerretDB` database `builtin-prom-fr` through stats service `builtin-prom-fr-stats`.
Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics.
diff --git a/docs/guides/ferretdb/monitoring/using-prometheus-operator.md b/docs/guides/ferretdb/monitoring/using-prometheus-operator.md
index 7007a211d0..861373823a 100644
--- a/docs/guides/ferretdb/monitoring/using-prometheus-operator.md
+++ b/docs/guides/ferretdb/monitoring/using-prometheus-operator.md
@@ -350,8 +350,7 @@ Check the `endpoint` and `service` labels marked by the red rectangles. It verif
To clean up the Kubernetes resources created by this tutorial, run following commands
```bash
-kubectl delete -n demo pp/coreos-prom-pp
-kubectl delete -n demo pg/ha-postgres
+kubectl delete -n demo fr/coreos-prom-fr
kubectl delete ns demo
```
diff --git a/docs/guides/ferretdb/reconfigure-tls/reconfigure-tls.md b/docs/guides/ferretdb/reconfigure-tls/reconfigure-tls.md
index ac83465570..2217344798 100644
--- a/docs/guides/ferretdb/reconfigure-tls/reconfigure-tls.md
+++ b/docs/guides/ferretdb/reconfigure-tls/reconfigure-tls.md
@@ -45,10 +45,10 @@ In this section, we are going to deploy a FerretDB without TLS. In the next few
apiVersion: kubedb.com/v1alpha2
kind: FerretDB
metadata:
- name: ferretdb
+ name: ferretdb-x
namespace: demo
spec:
- version: "1.23.0"
+ version: "1.18.0"
storage:
accessModes:
- ReadWriteOnce
@@ -72,7 +72,7 @@ Now, wait until `ferretdb` has status `Ready`. i.e,
```bash
$ kubectl get fr -n demo
NAME NAMESPACE VERSION STATUS AGE
-ferretdb demo 1.23.0 Ready 75s
+ferretdb demo 1.18.0 Ready 75s
$ kubectl dba describe ferretdb ferretdb -n demo
Name: ferretdb
@@ -138,7 +138,7 @@ Spec:
Requests:
Storage: 500Mi
Storage Type: Durable
- Version: 1.23.0
+ Version: 1.18.0
Status:
Conditions:
Last Transition Time: 2024-10-17T11:04:08Z
@@ -456,14 +456,7 @@ So, here we have connected using the client certificate and the connection is tl
## Rotate Certificate
-Now we are going to rotate the certificate of this database. First let's check the current expiration date of the certificate.
-
-```bash
-$ openssl x509 -in ./ca.crt -inform PEM -enddate -nameopt RFC2253 -noout
-notAfter=Oct 14 10:20:07 2025 GMT
-```
-
-So, the certificate will expire on this time `Oct 14 10:20:07 2025 GMT`.
+Now we are going to rotate the certificate of this database. First we can store the current expiration date of the certificate by exec into `ferretdb-0` pod. Certs are located in `/etc/certs/server/` path.
### Create FerretDBOpsRequest
@@ -639,14 +632,6 @@ Events:
Normal Successful 16s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb for FerretDBOpsRequest: frops-rotate
```
-Now, let's check the expiration date of the certificate.
-
-```bash
-$ kubectl exec -it -n demo ferretdb-0 -- bash master ⬆ ⬇ ✱ ◼
-ferretdb-0:/$ openssl x509 -in /opt/ferretdb-II/tls/ca.pem -inform PEM -enddate -nameopt RFC2253 -noout
-notAfter=Oct 27 07:10:20 2024 GMT
-```
-
As we can see from the above output, the certificate has been rotated successfully.
## Change Issuer/ClusterIssuer
@@ -702,7 +687,7 @@ In order to use the new issuer to issue new certificates, we have to create a `F
apiVersion: ops.kubedb.com/v1alpha1
kind: FerretDBOpsRequest
metadata:
- name: ppops-change-issuer
+ name: frops-change-issuer
namespace: demo
spec:
type: ReconfigureTLS
@@ -724,8 +709,8 @@ Here,
Let's create the `FerretDBOpsRequest` CR we have shown above,
```bash
-$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/reconfigure-tls/ppops-change-issuer.yaml
-ferretdbopsrequest.ops.kubedb.com/ppops-change-issuer created
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/reconfigure-tls/frops-change-issuer.yaml
+ferretdbopsrequest.ops.kubedb.com/frops-change-issuer created
```
#### Verify Issuer is changed successfully
@@ -736,24 +721,24 @@ Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following comma
$ watch kubectl get ferretdbopsrequest -n demo
Every 2.0s: kubectl get ferretdbopsrequest -n demo
NAME TYPE STATUS AGE
-ppops-change-issuer ReconfigureTLS Successful 87s
+frops-change-issuer ReconfigureTLS Successful 87s
```
We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed.
```bash
-$ kubectl describe ferretdbopsrequest -n demo ppops-change-issuer
-Name: ppops-change-issuer
+$ kubectl describe ferretdbopsrequest -n demo frops-change-issuer
+Name: frops-change-issuer
Namespace: demo
Labels:
Annotations:
API Version: ops.kubedb.com/v1alpha1
Kind: FerretDBOpsRequest
Metadata:
- Creation Timestamp: 2024-07-29T07:37:09Z
+ Creation Timestamp: 2024-10-18T10:14:38Z
Generation: 1
- Resource Version: 12367
- UID: f48452ed-7264-4e99-80f1-58d7e826d9a9
+ Resource Version: 423126
+ UID: 1bf730e8-603e-4f30-b9ab-5a4e75d3a4d4
Spec:
Apply: IfReady
Database Ref:
@@ -766,74 +751,83 @@ Spec:
Type: ReconfigureTLS
Status:
Conditions:
- Last Transition Time: 2024-07-29T07:37:09Z
- Message: FerretDB ops-request has started to reconfigure tls for RabbitMQ nodes
+ Last Transition Time: 2024-10-18T10:14:38Z
+ Message: FerretDB ops-request has started to reconfigure tls for FerretDB nodes
Observed Generation: 1
Reason: ReconfigureTLS
Status: True
Type: ReconfigureTLS
- Last Transition Time: 2024-07-29T07:37:12Z
+ Last Transition Time: 2024-10-18T10:14:41Z
Message: Successfully paused database
Observed Generation: 1
Reason: DatabasePauseSucceeded
Status: True
Type: DatabasePauseSucceeded
- Last Transition Time: 2024-07-29T07:37:24Z
- Message: Successfully synced all certificates
- Observed Generation: 1
- Reason: CertificateSynced
- Status: True
- Type: CertificateSynced
- Last Transition Time: 2024-07-29T07:37:18Z
+ Last Transition Time: 2024-10-18T10:14:46Z
Message: get certificate; ConditionStatus:True
Observed Generation: 1
Status: True
Type: GetCertificate
- Last Transition Time: 2024-07-29T07:37:18Z
- Message: check ready condition; ConditionStatus:True
+ Last Transition Time: 2024-10-18T10:14:46Z
+ Message: ready condition; ConditionStatus:True
Observed Generation: 1
Status: True
- Type: CheckReadyCondition
- Last Transition Time: 2024-07-29T07:37:18Z
- Message: check issuing condition; ConditionStatus:True
+ Type: ReadyCondition
+ Last Transition Time: 2024-10-18T10:14:46Z
+ Message: issuing condition; ConditionStatus:True
Observed Generation: 1
Status: True
- Type: CheckIssuingCondition
- Last Transition Time: 2024-07-29T07:37:30Z
- Message: successfully reconciled the FerretDB with TLS
+ Type: IssuingCondition
+ Last Transition Time: 2024-10-18T10:14:46Z
+ Message: Successfully synced all certificates
Observed Generation: 1
- Reason: UpdatePetSets
+ Reason: CertificateSynced
Status: True
- Type: UpdatePetSets
- Last Transition Time: 2024-07-29T07:38:15Z
- Message: Successfully Restarted FerretDB pods
+ Type: CertificateSynced
+ Last Transition Time: 2024-10-18T10:14:51Z
+ Message: successfully reconciled the FerretDB with tls configuration
Observed Generation: 1
- Reason: RestartPods
+ Reason: UpdatePetSets
Status: True
- Type: RestartPods
- Last Transition Time: 2024-07-29T07:37:35Z
+ Type: UpdatePetSets
+ Last Transition Time: 2024-10-18T10:14:56Z
Message: get pod; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: GetPod--ferretdb-0
- Last Transition Time: 2024-07-29T07:37:35Z
+ Last Transition Time: 2024-10-18T10:14:56Z
Message: evict pod; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: EvictPod--ferretdb-0
- Last Transition Time: 2024-07-29T07:38:10Z
+ Last Transition Time: 2024-10-18T10:15:01Z
Message: check pod running; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: CheckPodRunning--ferretdb-0
- Last Transition Time: 2024-07-29T07:38:15Z
- Message: Successfully updated FerretDB
+ Last Transition Time: 2024-10-18T10:15:06Z
+ Message: get pod; ConditionStatus:True; PodName:ferretdb-1
+ Observed Generation: 1
+ Status: True
+ Type: GetPod--ferretdb-1
+ Last Transition Time: 2024-10-18T10:15:06Z
+ Message: evict pod; ConditionStatus:True; PodName:ferretdb-1
Observed Generation: 1
- Reason: UpdateDatabase
Status: True
- Type: UpdateDatabase
- Last Transition Time: 2024-07-29T07:38:16Z
- Message: Successfully updated FerretDB TLS
+ Type: EvictPod--ferretdb-1
+ Last Transition Time: 2024-10-18T10:15:11Z
+ Message: check pod running; ConditionStatus:True; PodName:ferretdb-1
+ Observed Generation: 1
+ Status: True
+ Type: CheckPodRunning--ferretdb-1
+ Last Transition Time: 2024-10-18T10:15:16Z
+ Message: Successfully restarted all nodes
+ Observed Generation: 1
+ Reason: RestartNodes
+ Status: True
+ Type: RestartNodes
+ Last Transition Time: 2024-10-18T10:15:16Z
+ Message: Successfully completed the ReconfigureTLS for FerretDB
Observed Generation: 1
Reason: Successful
Status: True
@@ -841,50 +835,33 @@ Status:
Observed Generation: 1
Phase: Successful
Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal Starting 3m39s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ppops-change-issuer
- Normal Starting 3m39s KubeDB Ops-manager Operator Pausing FerretDB databse: demo/ferretdb
- Normal Successful 3m39s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/ferretdb for FerretDBOpsRequest: ppops-change-issuer
- Warning get certificate; ConditionStatus:True 3m30s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Warning get certificate; ConditionStatus:True 3m30s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Warning get certificate; ConditionStatus:True 3m30s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m30s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Normal CertificateSynced 3m30s KubeDB Ops-manager Operator Successfully synced all certificates
- Warning get certificate; ConditionStatus:True 3m25s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m25s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m24s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Warning get certificate; ConditionStatus:True 3m24s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m24s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m24s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Warning get certificate; ConditionStatus:True 3m24s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
- Warning check ready condition; ConditionStatus:True 3m24s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True
- Warning check issuing condition; ConditionStatus:True 3m24s KubeDB Ops-manager Operator check issuing condition; ConditionStatus:True
- Normal CertificateSynced 3m24s KubeDB Ops-manager Operator Successfully synced all certificates
- Normal UpdatePetSets 3m18s KubeDB Ops-manager Operator successfully reconciled the FerretDB with TLS
- Warning get pod; ConditionStatus:True; PodName:ferretdb-0 3m13s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-0
- Warning evict pod; ConditionStatus:True; PodName:ferretdb-0 3m13s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-0
- Warning check pod running; ConditionStatus:False; PodName:ferretdb-0 3m8s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:ferretdb-0
- Warning check pod running; ConditionStatus:True; PodName:ferretdb-0 2m38s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-0
- Normal RestartPods 2m33s KubeDB Ops-manager Operator Successfully Restarted FerretDB pods
- Normal Starting 2m32s KubeDB Ops-manager Operator Resuming FerretDB database: demo/ferretdb
- Normal Successful 2m32s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb for FerretDBOpsRequest: ppops-change-issuer
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 88s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/frops-change-issuer
+ Normal Starting 88s KubeDB Ops-manager Operator Pausing FerretDB database: demo/ferretdb
+ Normal Successful 88s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/ferretdb for FerretDBOpsRequest: frops-change-issuer
+ Warning get certificate; ConditionStatus:True 80s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
+ Warning ready condition; ConditionStatus:True 80s KubeDB Ops-manager Operator ready condition; ConditionStatus:True
+ Warning issuing condition; ConditionStatus:True 80s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True
+ Warning get certificate; ConditionStatus:True 80s KubeDB Ops-manager Operator get certificate; ConditionStatus:True
+ Warning ready condition; ConditionStatus:True 80s KubeDB Ops-manager Operator ready condition; ConditionStatus:True
+ Warning issuing condition; ConditionStatus:True 80s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True
+ Normal CertificateSynced 80s KubeDB Ops-manager Operator Successfully synced all certificates
+ Normal UpdatePetSets 75s KubeDB Ops-manager Operator successfully reconciled the FerretDB with tls configuration
+ Warning get pod; ConditionStatus:True; PodName:ferretdb-0 70s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-0
+ Warning evict pod; ConditionStatus:True; PodName:ferretdb-0 70s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-0
+ Warning check pod running; ConditionStatus:True; PodName:ferretdb-0 65s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-0
+ Warning get pod; ConditionStatus:True; PodName:ferretdb-1 60s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-1
+ Warning evict pod; ConditionStatus:True; PodName:ferretdb-1 60s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-1
+ Warning check pod running; ConditionStatus:True; PodName:ferretdb-1 55s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-1
+ Normal RestartNodes 50s KubeDB Ops-manager Operator Successfully restarted all nodes
+ Normal Starting 50s KubeDB Ops-manager Operator Resuming FerretDB database: demo/ferretdb
+ Normal Successful 50s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb for FerretDBOpsRequest: frops-change-issuer
```
-Now, Let's exec ferretdb and find out the ca subject to see if it matches the one we have provided.
-
-```bash
-$ kubectl exec -it -n demo ferretdb-0 -- bash
-ferretdb-0:/$ openssl x509 -in /opt/ferretdb-II/tls/ca.pem -inform PEM -subject -nameopt RFC2253 -noout
-subject=O=kubedb-updated,CN=ca-updated
-```
+Now, If exec ferretdb and find out the ca subject in `/etc/certs/server` location, we can see that the CN and O is updated according to out new ca.crt.
-We can see from the above output that, the subject name matches the subject name of the new ca certificate that we have created. So, the issuer is changed successfully.
+We can see that subject name of this ca.crt matches the subject name of the new ca certificate that we have created. So, the issuer is changed successfully.
## Remove TLS from the ferretdb
@@ -898,7 +875,7 @@ Below is the YAML of the `FerretDBOpsRequest` CRO that we are going to create,
apiVersion: ops.kubedb.com/v1alpha1
kind: FerretDBOpsRequest
metadata:
- name: ppops-remove
+ name: frops-remove
namespace: demo
spec:
type: ReconfigureTLS
@@ -917,8 +894,8 @@ Here,
Let's create the `FerretDBOpsRequest` CR we have shown above,
```bash
-$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/reconfigure-tls/ppops-remove.yaml
-ferretdbopsrequest.ops.kubedb.com/ppops-remove created
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/reconfigure-tls/frops-remove.yaml
+ferretdbopsrequest.ops.kubedb.com/frops-remove created
```
#### Verify TLS Removed Successfully
@@ -929,24 +906,24 @@ Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following comma
$ wacth kubectl get ferretdbopsrequest -n demo
Every 2.0s: kubectl get ferretdbopsrequest -n demo
NAME TYPE STATUS AGE
-ppops-remove ReconfigureTLS Successful 65s
+frops-remove ReconfigureTLS Successful 65s
```
We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed.
```bash
-$ kubectl describe ferretdbopsrequest -n demo ppops-remove
-Name: ppops-remove
+$ kubectl describe ferretdbopsrequest -n demo frops-remove
+Name: frops-remove
Namespace: demo
Labels:
Annotations:
API Version: ops.kubedb.com/v1alpha1
Kind: FerretDBOpsRequest
Metadata:
- Creation Timestamp: 2024-07-29T08:38:35Z
+ Creation Timestamp: 2024-10-18T11:11:55Z
Generation: 1
- Resource Version: 16378
- UID: f848e04f-0fd1-48ce-813d-67dbdc3e4a55
+ Resource Version: 428244
+ UID: 28a6ba72-0a2d-47f1-97b0-1e9609845acc
Spec:
Apply: IfReady
Database Ref:
@@ -956,53 +933,62 @@ Spec:
Type: ReconfigureTLS
Status:
Conditions:
- Last Transition Time: 2024-07-29T08:38:37Z
- Message: FerretDB ops-request has started to reconfigure tls for RabbitMQ nodes
+ Last Transition Time: 2024-10-18T11:11:55Z
+ Message: FerretDB ops-request has started to reconfigure tls for FerretDB nodes
Observed Generation: 1
Reason: ReconfigureTLS
Status: True
Type: ReconfigureTLS
- Last Transition Time: 2024-07-29T08:38:41Z
+ Last Transition Time: 2024-10-18T11:11:58Z
Message: Successfully paused database
Observed Generation: 1
Reason: DatabasePauseSucceeded
Status: True
Type: DatabasePauseSucceeded
- Last Transition Time: 2024-07-29T08:38:47Z
- Message: successfully reconciled the FerretDB with TLS
+ Last Transition Time: 2024-10-18T11:12:04Z
+ Message: successfully reconciled the FerretDB with tls configuration
Observed Generation: 1
Reason: UpdatePetSets
Status: True
Type: UpdatePetSets
- Last Transition Time: 2024-07-29T08:39:32Z
- Message: Successfully Restarted FerretDB pods
- Observed Generation: 1
- Reason: RestartPods
- Status: True
- Type: RestartPods
- Last Transition Time: 2024-07-29T08:38:52Z
+ Last Transition Time: 2024-10-18T11:12:09Z
Message: get pod; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: GetPod--ferretdb-0
- Last Transition Time: 2024-07-29T08:38:52Z
+ Last Transition Time: 2024-10-18T11:12:09Z
Message: evict pod; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: EvictPod--ferretdb-0
- Last Transition Time: 2024-07-29T08:39:27Z
+ Last Transition Time: 2024-10-18T11:12:14Z
Message: check pod running; ConditionStatus:True; PodName:ferretdb-0
Observed Generation: 1
Status: True
Type: CheckPodRunning--ferretdb-0
- Last Transition Time: 2024-07-29T08:39:32Z
- Message: Successfully updated FerretDB
+ Last Transition Time: 2024-10-18T11:12:19Z
+ Message: get pod; ConditionStatus:True; PodName:ferretdb-1
+ Observed Generation: 1
+ Status: True
+ Type: GetPod--ferretdb-1
+ Last Transition Time: 2024-10-18T11:12:19Z
+ Message: evict pod; ConditionStatus:True; PodName:ferretdb-1
+ Observed Generation: 1
+ Status: True
+ Type: EvictPod--ferretdb-1
+ Last Transition Time: 2024-10-18T11:12:24Z
+ Message: check pod running; ConditionStatus:True; PodName:ferretdb-1
+ Observed Generation: 1
+ Status: True
+ Type: CheckPodRunning--ferretdb-1
+ Last Transition Time: 2024-10-18T11:12:29Z
+ Message: Successfully restarted all nodes
Observed Generation: 1
- Reason: UpdateDatabase
+ Reason: RestartNodes
Status: True
- Type: UpdateDatabase
- Last Transition Time: 2024-07-29T08:39:33Z
- Message: Successfully updated FerretDB TLS
+ Type: RestartNodes
+ Last Transition Time: 2024-10-18T11:12:29Z
+ Message: Successfully completed the ReconfigureTLS for FerretDB
Observed Generation: 1
Reason: Successful
Status: True
@@ -1010,64 +996,61 @@ Status:
Observed Generation: 1
Phase: Successful
Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal Starting 84s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ppops-remove
- Normal Starting 84s KubeDB Ops-manager Operator Pausing FerretDB databse: demo/ferretdb
- Normal Successful 83s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/ferretdb for FerretDBOpsRequest: ppops-remove
- Normal UpdatePetSets 74s KubeDB Ops-manager Operator successfully reconciled the FerretDB with TLS
- Warning get pod; ConditionStatus:True; PodName:ferretdb-0 69s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-0
- Warning evict pod; ConditionStatus:True; PodName:ferretdb-0 69s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-0
- Warning check pod running; ConditionStatus:False; PodName:ferretdb-0 64s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:ferretdb-0
- Warning check pod running; ConditionStatus:True; PodName:ferretdb-0 34s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-0
- Normal RestartPods 29s KubeDB Ops-manager Operator Successfully Restarted FerretDB pods
- Normal Starting 29s KubeDB Ops-manager Operator Resuming FerretDB database: demo/ferretdb
- Normal Successful 28s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb for FerretDBOpsRequest: ppops-remove
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 87s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/frops-remove
+ Normal Starting 87s KubeDB Ops-manager Operator Pausing FerretDB database: demo/ferretdb
+ Normal Successful 87s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/ferretdb for FerretDBOpsRequest: frops-remove
+ Normal UpdatePetSets 78s KubeDB Ops-manager Operator successfully reconciled the FerretDB with tls configuration
+ Warning get pod; ConditionStatus:True; PodName:ferretdb-0 73s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-0
+ Warning evict pod; ConditionStatus:True; PodName:ferretdb-0 73s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-0
+ Warning check pod running; ConditionStatus:True; PodName:ferretdb-0 68s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-0
+ Warning get pod; ConditionStatus:True; PodName:ferretdb-1 63s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ferretdb-1
+ Warning evict pod; ConditionStatus:True; PodName:ferretdb-1 63s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ferretdb-1
+ Warning check pod running; ConditionStatus:True; PodName:ferretdb-1 58s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ferretdb-1
+ Normal RestartNodes 53s KubeDB Ops-manager Operator Successfully restarted all nodes
+ Normal Starting 53s KubeDB Ops-manager Operator Resuming FerretDB database: demo/ferretdb
+ Normal Successful 53s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/ferretdb for FerretDBOpsRequest: frops-remove
```
-Now, Let's exec into ferretdb and find out that TLS is disabled or not.
+Now, Let's try to connect with ferretdb without TLS certs.
```bash
-$ kubectl exec -it -n demo ferretdb-0 -- bash
-ferretdb-0:/$ cat opt/ferretdb-II/etc/ferretdb.conf
-backend_hostname0 = 'ha-postgres.demo.svc'
-backend_port0 = 5432
-backend_weight0 = 1
-backend_flag0 = 'ALWAYS_PRIMARY|DISALLOW_TO_FAILOVER'
-backend_hostname1 = 'ha-postgres-standby.demo.svc'
-backend_port1 = 5432
-backend_weight1 = 1
-backend_flag1 = 'DISALLOW_TO_FAILOVER'
-enable_pool_hba = on
-listen_addresses = *
-port = 9999
-socket_dir = '/var/run/ferretdb'
-pcp_listen_addresses = *
-pcp_port = 9595
-pcp_socket_dir = '/var/run/ferretdb'
-log_per_node_statement = on
-sr_check_period = 0
-health_check_period = 0
-backend_clustering_mode = 'streaming_replication'
-num_init_children = 5
-max_pool = 15
-child_life_time = 300
-child_max_connections = 0
-connection_life_time = 0
-client_idle_limit = 0
-connection_cache = on
-load_balance_mode = on
-ssl = 'off'
-failover_on_backend_error = 'off'
-log_min_messages = 'warning'
-statement_level_load_balance = 'off'
-memory_cache_enabled = 'off'
-memqcache_oiddir = '/tmp/oiddir/'
-allow_clear_text_frontend_auth = 'false'
-failover_on_backend_error = 'off'
+$ kubectl get secrets -n demo ferret-auth -o jsonpath='{.data.\username}' | base64 -d
+postgres
+$ kubectl get secrets -n demo ferret-auth -o jsonpath='{.data.\\password}' | base64 -d
+l*jGp8u*El8WRSDJ
+
+$ kubectl port-forward svc/ferret -n demo 27017
+Forwarding from 127.0.0.1:27017 -> 27017
+Forwarding from [::1]:27017 -> 27017
+Handling connection for 27017
+Handling connection for 27017
+```
+
+Now in another terminal
+
+```bash
+$ mongosh 'mongodb://postgres:l*jGp8u*El8WRSDJ@localhost:27017/ferretdb?authMechanism=PLAIN'
+Current Mongosh Log ID: 65efeea2a3347fff66d04c70
+Connecting to: mongodb://@localhost:27017/ferretdb?authMechanism=PLAIN&directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5
+Using MongoDB: 7.0.42
+Using Mongosh: 2.1.5
+
+For mongosh info see: https://docs.mongodb.com/mongodb-shell/
+
+------
+ The server generated these startup warnings when booting
+ 2024-03-12T05:56:50.979Z: Powered by FerretDB v1.18.0 and PostgreSQL 13.13 on x86_64-pc-linux-musl, compiled by gcc.
+ 2024-03-12T05:56:50.979Z: Please star us on GitHub: https://github.com/FerretDB/FerretDB.
+ 2024-03-12T05:56:50.979Z: The telemetry state is undecided.
+ 2024-03-12T05:56:50.979Z: Read more about FerretDB telemetry and how to opt out at https://beacon.ferretdb.io.
+------
+
+ferretdb>
```
-We can see from the above output that `ssl='off'` so we can verify that TLS is disabled successfully for this ferretdb.
+We can see that we can now connect without providing TLS certs. So TLS connection is successfully disabled
## Cleaning up
@@ -1076,8 +1059,7 @@ To clean up the Kubernetes resources created by this tutorial, run:
```bash
kubectl delete ferretdb -n demo ferretdb
kubectl delete issuer -n demo ferretdb-issuer fr-new-issuer
-kubectl delete ferretdbopsrequest -n demo ppops-add-tls ppops-remove ppops-rotate ppops-change-issuer
-kubectl delete pg -n demo ha-postgres
+kubectl delete ferretdbopsrequest -n demo frops-add-tls frops-remove frops-rotate frops-change-issuer
kubectl delete ns demo
```
diff --git a/docs/guides/ferretdb/restart/_index.md b/docs/guides/ferretdb/restart/_index.md
new file mode 100644
index 0000000000..f63cdeaa1f
--- /dev/null
+++ b/docs/guides/ferretdb/restart/_index.md
@@ -0,0 +1,10 @@
+---
+title: Restart FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-restart
+ name: Restart
+ parent: fr-ferretdb-guides
+ weight: 46
+menu_name: docs_{{ .version }}
+---
diff --git a/docs/guides/ferretdb/restart/restart.md b/docs/guides/ferretdb/restart/restart.md
new file mode 100644
index 0000000000..1441676233
--- /dev/null
+++ b/docs/guides/ferretdb/restart/restart.md
@@ -0,0 +1,174 @@
+---
+title: Restart FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-restart-details
+ name: Restart FerretDB
+ parent: fr-restart
+ weight: 10
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# Restart FerretDB
+
+KubeDB supports restarting the FerretDB via a FerretDBOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that.
+
+## Before You Begin
+
+- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
+
+- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md).
+
+- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial.
+
+```bash
+ $ kubectl create ns demo
+ namespace/demo created
+ ```
+
+> Note: YAML files used in this tutorial are stored in [docs/examples/ferretdb](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/ferretdb) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs).
+
+## Deploy FerretDB
+
+In this section, we are going to deploy a FerretDB using KubeDB.
+
+```yaml
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: ferretdb
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
+```
+
+Let's create the `FerretDB` CR we have shown above,
+
+```bash
+$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/restart/ferretdb.yaml
+ferretdb.kubedb.com/ferretdb created
+```
+
+## Apply Restart opsRequest
+
+```yaml
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: restart-ferretdb
+ namespace: demo
+spec:
+ type: Restart
+ databaseRef:
+ name: ferretdb
+ timeout: 3m
+ apply: Always
+```
+
+- `spec.type` specifies the Type of the ops Request
+- `spec.databaseRef` holds the name of the FerretDB. The ferretdb should be available in the same namespace as the opsRequest
+- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/ferretdb/concepts/opsrequest.md#spectimeout)
+
+Let's create the `FerretDBOpsRequest` CR we have shown above,
+
+```bash
+$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/restart/ops.yaml
+ferretdbopsrequest.ops.kubedb.com/restart-ferretdb created
+```
+
+Now the Ops-manager operator will restart the pods one by one.
+
+```shell
+$ kubectl get frops -n demo
+NAME TYPE STATUS AGE
+restart-ferretdb Restart Successful 2m15s
+
+$ kubectl get frops -n demo -oyaml restart-ferretdb
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ annotations:
+ kubectl.kubernetes.io/last-applied-configuration: |
+ {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"FerretDBOpsRequest","metadata":{"annotations":{},"name":"restart-ferretdb","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"ferretdb"},"timeout":"3m","type":"Restart"}}
+ creationTimestamp: "2024-10-21T12:38:38Z"
+ generation: 1
+ name: restart-ferretdb
+ namespace: demo
+ resourceVersion: "367859"
+ uid: 0ca77cab-d354-43a4-ba85-c31f1f6e685d
+spec:
+ apply: Always
+ databaseRef:
+ name: ferretdb
+ timeout: 3m
+ type: Restart
+status:
+ conditions:
+ - lastTransitionTime: "2024-10-21T12:38:38Z"
+ message: FerretDBOpsRequest has started to restart FerretDB nodes
+ observedGeneration: 1
+ reason: Restart
+ status: "True"
+ type: Restart
+ - lastTransitionTime: "2024-10-21T12:38:46Z"
+ message: get pod; ConditionStatus:True; PodName:ferretdb-0
+ observedGeneration: 1
+ status: "True"
+ type: GetPod--ferretdb-0
+ - lastTransitionTime: "2024-10-21T12:38:46Z"
+ message: evict pod; ConditionStatus:True; PodName:ferretdb-0
+ observedGeneration: 1
+ status: "True"
+ type: EvictPod--ferretdb-0
+ - lastTransitionTime: "2024-10-21T12:38:51Z"
+ message: check pod running; ConditionStatus:True; PodName:ferretdb-0
+ observedGeneration: 1
+ status: "True"
+ type: CheckPodRunning--ferretdb-0
+ - lastTransitionTime: "2024-10-21T12:38:56Z"
+ message: Successfully restarted FerretDB nodes
+ observedGeneration: 1
+ reason: RestartNodes
+ status: "True"
+ type: RestartNodes
+ - lastTransitionTime: "2024-10-21T12:38:56Z"
+ message: Controller has successfully restart the FerretDB replicas
+ observedGeneration: 1
+ reason: Successful
+ status: "True"
+ type: Successful
+ observedGeneration: 1
+ phase: Successful
+```
+
+
+## Cleaning up
+
+To clean up the Kubernetes resources created by this tutorial, run:
+
+```bash
+kubectl delete ferretdbopsrequest -n demo restart-ferretdb
+kubectl delete ferretdb -n demo ferretdb
+kubectl delete ns demo
+```
+
+## Next Steps
+
+- Detail concepts of [FerretDB object](/docs/guides/ferretdb/concepts/ferretdb.md).
+- Monitor your FerretDB database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/ferretdb/monitoring/using-prometheus-operator.md).
+- Monitor your FerretDB database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/ferretdb/monitoring/using-builtin-prometheus.md).
+- Detail concepts of [FerretDB object](/docs/guides/ferretdb/concepts/ferretdb.md).
+- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md).
diff --git a/docs/guides/ferretdb/scaling/_index.md b/docs/guides/ferretdb/scaling/_index.md
new file mode 100644
index 0000000000..871b7e0178
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/_index.md
@@ -0,0 +1,10 @@
+---
+title: Scaling FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-scaling
+ name: Scaling
+ parent: fr-ferretdb-guides
+ weight: 43
+menu_name: docs_{{ .version }}
+---
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/horizontal-scaling/_index.md b/docs/guides/ferretdb/scaling/horizontal-scaling/_index.md
new file mode 100644
index 0000000000..123b627e9a
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/horizontal-scaling/_index.md
@@ -0,0 +1,10 @@
+---
+title: Horizontal Scaling
+menu:
+ docs_{{ .version }}:
+ identifier: fr-horizontal-scaling
+ name: Horizontal Scaling
+ parent: fr-scaling
+ weight: 10
+menu_name: docs_{{ .version }}
+---
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/horizontal-scaling/horizontal-ops.md b/docs/guides/ferretdb/scaling/horizontal-scaling/horizontal-ops.md
new file mode 100644
index 0000000000..4929ffbfac
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/horizontal-scaling/horizontal-ops.md
@@ -0,0 +1,432 @@
+---
+title: Horizontal Scaling FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-horizontal-scaling-ops
+ name: HorizontalScaling OpsRequest
+ parent: fr-horizontal-scaling
+ weight: 20
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# Horizontal Scale FerretDB
+
+This guide will show you how to use `KubeDB` Ops-manager operator to scale the replicaset of a FerretDB.
+
+## Before You Begin
+
+- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
+
+- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md).
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+ - [Horizontal Scaling Overview](/docs/guides/ferretdb/scaling/horizontal-scaling/overview.md)
+
+To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial.
+
+```bash
+$ kubectl create ns demo
+namespace/demo created
+```
+
+> **Note:** YAML files used in this tutorial are stored in [docs/examples/ferretdb](/docs/examples/ferretdb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository.
+
+## Apply Horizontal Scaling on ferretdb
+
+Here, we are going to deploy a `FerretDB` using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it.
+
+### Prepare FerretDB
+
+Now, we are going to deploy a `FerretDB` with version `1.23.0`.
+
+### Deploy FerretDB
+
+In this section, we are going to deploy a FerretDB. Then, in the next section we will scale the ferretdb using `FerretDBOpsRequest` CRD. Below is the YAML of the `FerretDB` CR that we are going to create,
+
+```yaml
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-horizontal
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
+```
+
+```bash
+$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/scaling/fr-horizontal.yaml
+ferretdb.kubedb.com/fr-horizontal created
+```
+
+Now, wait until `fr-horizontal ` has status `Ready`. i.e,
+
+```bash
+$ kubectl get fr -n demo
+NAME TYPE VERSION STATUS AGE
+fr-horizontal kubedb.com/v1alpha2 1.23.0 Ready 2m
+```
+
+Let's check the number of replicas this ferretdb has from the FerretDB object, number of pods the petset have,
+
+```bash
+$ kubectl get ferretdb -n demo fr-horizontal -o json | jq '.spec.replicas'
+1
+
+$ kubectl get petset -n demo fr-horizontal -o json | jq '.spec.replicas'
+1
+```
+
+We can see from both command that the ferretdb has 1 replicas.
+
+We are now ready to apply the `FerretDBOpsRequest` CR to scale this ferretdb.
+
+## Scale Up Replicas
+
+Here, we are going to scale up the replicas of the ferretdb to meet the desired number of replicas after scaling.
+
+#### Create FerretDBOpsRequest
+
+In order to scale up the replicas of the ferretdb, we have to create a `FerretDBOpsRequest` CR with our desired replicas. Below is the YAML of the `FerretDBOpsRequest` CR that we are going to create,
+
+```yaml
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-horizontal-scale-up
+ namespace: demo
+spec:
+ type: HorizontalScaling
+ databaseRef:
+ name: fr-horizontal
+ horizontalScaling:
+ node: 3
+```
+
+Here,
+
+- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `fr-horizontal` ferretdb.
+- `spec.type` specifies that we are performing `HorizontalScaling` on our ferretdb.
+- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling.
+
+Let's create the `FerretDBOpsRequest` CR we have shown above,
+
+```bash
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/scaling/horizontal-scaling/frops-hscale-up-ops.yaml
+ferretdbopsrequest.ops.kubedb.com/ferretdb-horizontal-scale-up created
+```
+
+#### Verify replicas scaled up successfully
+
+If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `FerretDB` object and related `PetSet`.
+
+Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following command to watch `FerretDBOpsRequest` CR,
+
+```bash
+$ watch kubectl get ferretdbopsrequest -n demo
+Every 2.0s: kubectl get ferretdbopsrequest -n demo
+NAME TYPE STATUS AGE
+ferretdb-horizontal-scale-up HorizontalScaling Successful 102s
+```
+
+We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed to scale the ferretdb.
+
+```bash
+$ kubectl describe ferretdbopsrequest -n demo ferretdb-horizontal-scale-up
+Name: ferretdb-horizontal-scale-up
+Namespace: demo
+Labels:
+Annotations:
+API Version: ops.kubedb.com/v1alpha1
+Kind: FerretDBOpsRequest
+Metadata:
+ Creation Timestamp: 2024-10-21T10:03:39Z
+ Generation: 1
+ Resource Version: 353610
+ UID: ce6c9e66-6196-4746-851a-ea49084eda05
+Spec:
+ Apply: IfReady
+ Database Ref:
+ Name: fr-horizontal
+ Horizontal Scaling:
+ Node: 3
+ Type: HorizontalScaling
+Status:
+ Conditions:
+ Last Transition Time: 2024-10-21T10:04:30Z
+ Message: FerretDB ops-request has started to horizontally scaling the nodes
+ Observed Generation: 1
+ Reason: HorizontalScaling
+ Status: True
+ Type: HorizontalScaling
+ Last Transition Time: 2024-10-21T10:04:33Z
+ Message: Successfully paused database
+ Observed Generation: 1
+ Reason: DatabasePauseSucceeded
+ Status: True
+ Type: DatabasePauseSucceeded
+ Last Transition Time: 2024-10-21T10:04:58Z
+ Message: Successfully Scaled Up Node
+ Observed Generation: 1
+ Reason: HorizontalScaleUp
+ Status: True
+ Type: HorizontalScaleUp
+ Last Transition Time: 2024-10-21T10:04:38Z
+ Message: patch petset; ConditionStatus:True; PodName:fr-horizontal-1
+ Observed Generation: 1
+ Status: True
+ Type: PatchPetset--fr-horizontal-1
+ Last Transition Time: 2024-10-21T10:04:43Z
+ Message: is pod ready; ConditionStatus:True; PodName:fr-horizontal-1
+ Observed Generation: 1
+ Status: True
+ Type: IsPodReady--fr-horizontal-1
+ Last Transition Time: 2024-10-21T10:04:43Z
+ Message: client failure; ConditionStatus:True; PodName:fr-horizontal-1
+ Observed Generation: 1
+ Status: True
+ Type: ClientFailure--fr-horizontal-1
+ Last Transition Time: 2024-10-21T10:04:43Z
+ Message: is node healthy; ConditionStatus:True; PodName:fr-horizontal-1
+ Observed Generation: 1
+ Status: True
+ Type: IsNodeHealthy--fr-horizontal-1
+ Last Transition Time: 2024-10-21T10:04:48Z
+ Message: patch petset; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: PatchPetset--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:04:48Z
+ Message: fr-horizontal already has desired replicas
+ Observed Generation: 1
+ Reason: HorizontalScale
+ Status: True
+ Type: HorizontalScale
+ Last Transition Time: 2024-10-21T10:04:53Z
+ Message: is pod ready; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: IsPodReady--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:04:53Z
+ Message: client failure; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: ClientFailure--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:04:53Z
+ Message: is node healthy; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: IsNodeHealthy--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:04:58Z
+ Message: Successfully updated FerretDB
+ Observed Generation: 1
+ Reason: UpdateDatabase
+ Status: True
+ Type: UpdateDatabase
+ Last Transition Time: 2024-10-21T10:04:58Z
+ Message: Successfully completed the HorizontalScaling for FerretDB
+ Observed Generation: 1
+ Reason: Successful
+ Status: True
+ Type: Successful
+ Observed Generation: 1
+ Phase: Successful
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 67s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ferretdb-horizontal-scale-up
+ Normal Starting 67s KubeDB Ops-manager Operator Pausing FerretDB database: demo/fr-horizontal
+ Normal Successful 67s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/fr-horizontal for FerretDBOpsRequest: ferretdb-horizontal-scale-up
+ Warning patch petset; ConditionStatus:True; PodName:fr-horizontal-1 59s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:fr-horizontal-1
+ Warning is pod ready; ConditionStatus:True; PodName:fr-horizontal-1 54s KubeDB Ops-manager Operator is pod ready; ConditionStatus:True; PodName:fr-horizontal-1
+ Warning client failure; ConditionStatus:True; PodName:fr-horizontal-1 54s KubeDB Ops-manager Operator client failure; ConditionStatus:True; PodName:fr-horizontal-1
+ Warning is node healthy; ConditionStatus:True; PodName:fr-horizontal-1 54s KubeDB Ops-manager Operator is node healthy; ConditionStatus:True; PodName:fr-horizontal-1
+ Warning patch petset; ConditionStatus:True; PodName:fr-horizontal-2 49s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:fr-horizontal-2
+ Warning is pod ready; ConditionStatus:True; PodName:fr-horizontal-2 44s KubeDB Ops-manager Operator is pod ready; ConditionStatus:True; PodName:fr-horizontal-2
+ Warning client failure; ConditionStatus:True; PodName:fr-horizontal-2 44s KubeDB Ops-manager Operator client failure; ConditionStatus:True; PodName:fr-horizontal-2
+ Warning is node healthy; ConditionStatus:True; PodName:fr-horizontal-2 44s KubeDB Ops-manager Operator is node healthy; ConditionStatus:True; PodName:fr-horizontal-2
+ Normal HorizontalScaleUp 39s KubeDB Ops-manager Operator Successfully Scaled Up Node
+ Normal UpdateDatabase 39s KubeDB Ops-manager Operator Successfully updated FerretDB
+ Normal Starting 39s KubeDB Ops-manager Operator Resuming FerretDB database: demo/fr-horizontal
+ Normal Successful 39s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/fr-horizontal for FerretDBOpsRequest: ferretdb-horizontal-scale-up
+```
+
+Now, we are going to verify the number of replicas this ferretdb has from the FerretDB object, number of pods the petset have,
+
+```bash
+$ kubectl get fr -n demo fr-horizontal -o json | jq '.spec.replicas'
+3
+
+$ kubectl get petset -n demo fr-horizontal -o json | jq '.spec.replicas'
+3
+```
+From all the above outputs we can see that the replicas of the ferretdb is `3`. That means we have successfully scaled up the replicas of the FerretDB.
+
+
+### Scale Down Replicas
+
+Here, we are going to scale down the replicas of the ferretdb to meet the desired number of replicas after scaling.
+
+#### Create FerretDBOpsRequest
+
+In order to scale down the replicas of the ferretdb, we have to create a `FerretDBOpsRequest` CR with our desired replicas. Below is the YAML of the `FerretDBOpsRequest` CR that we are going to create,
+
+```yaml
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-horizontal-scale-down
+ namespace: demo
+spec:
+ type: HorizontalScaling
+ databaseRef:
+ name: fr-horizontal
+ horizontalScaling:
+ node: 2
+```
+
+Here,
+
+- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `fr-horizontal` ferretdb.
+- `spec.type` specifies that we are performing `HorizontalScaling` on our ferretdb.
+- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling.
+
+Let's create the `FerretDBOpsRequest` CR we have shown above,
+
+```bash
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/scaling/horizontal-scaling/frops-hscale-down-ops.yaml
+ferretdbopsrequest.ops.kubedb.com/ferretdb-horizontal-scale-down created
+```
+
+#### Verify replicas scaled down successfully
+
+If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `FerretDB` object and related `PetSet`.
+
+Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following command to watch `FerretDBOpsRequest` CR,
+
+```bash
+$ watch kubectl get ferretdbopsrequest -n demo
+Every 2.0s: kubectl get ferretdbopsrequest -n demo
+NAME TYPE STATUS AGE
+ferretdb-horizontal-scale-down HorizontalScaling Successful 40s
+```
+
+We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed to scale the ferretdb.
+
+```bash
+$ kubectl describe ferretdbopsrequest -n demo ferretdb-horizontal-scale-down
+Name: ferretdb-horizontal-scale-down
+Namespace: demo
+Labels:
+Annotations:
+API Version: ops.kubedb.com/v1alpha1
+Kind: FerretDBOpsRequest
+Metadata:
+ Creation Timestamp: 2024-10-21T10:06:42Z
+ Generation: 1
+ Resource Version: 353838
+ UID: 69cb9e8a-ec89-41e2-9e91-ce61a68044b9
+Spec:
+ Apply: IfReady
+ Database Ref:
+ Name: fr-horizontal
+ Horizontal Scaling:
+ Node: 2
+ Type: HorizontalScaling
+Status:
+ Conditions:
+ Last Transition Time: 2024-10-21T10:06:42Z
+ Message: FerretDB ops-request has started to horizontally scaling the nodes
+ Observed Generation: 1
+ Reason: HorizontalScaling
+ Status: True
+ Type: HorizontalScaling
+ Last Transition Time: 2024-10-21T10:06:45Z
+ Message: Successfully paused database
+ Observed Generation: 1
+ Reason: DatabasePauseSucceeded
+ Status: True
+ Type: DatabasePauseSucceeded
+ Last Transition Time: 2024-10-21T10:07:00Z
+ Message: Successfully Scaled Down Node
+ Observed Generation: 1
+ Reason: HorizontalScaleDown
+ Status: True
+ Type: HorizontalScaleDown
+ Last Transition Time: 2024-10-21T10:06:50Z
+ Message: patch petset; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: PatchPetset--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:06:51Z
+ Message: fr-horizontal already has desired replicas
+ Observed Generation: 1
+ Reason: HorizontalScale
+ Status: True
+ Type: HorizontalScale
+ Last Transition Time: 2024-10-21T10:06:55Z
+ Message: get pod; ConditionStatus:True; PodName:fr-horizontal-2
+ Observed Generation: 1
+ Status: True
+ Type: GetPod--fr-horizontal-2
+ Last Transition Time: 2024-10-21T10:07:00Z
+ Message: Successfully updated FerretDB
+ Observed Generation: 1
+ Reason: UpdateDatabase
+ Status: True
+ Type: UpdateDatabase
+ Last Transition Time: 2024-10-21T10:07:00Z
+ Message: Successfully completed the HorizontalScaling for FerretDB
+ Observed Generation: 1
+ Reason: Successful
+ Status: True
+ Type: Successful
+ Observed Generation: 1
+ Phase: Successful
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 55s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ferretdb-horizontal-scale-down
+ Normal Starting 55s KubeDB Ops-manager Operator Pausing FerretDB database: demo/fr-horizontal
+ Normal Successful 55s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/fr-horizontal for FerretDBOpsRequest: ferretdb-horizontal-scale-down
+ Warning patch petset; ConditionStatus:True; PodName:fr-horizontal-2 47s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:fr-horizontal-2
+ Warning get pod; ConditionStatus:True; PodName:fr-horizontal-2 42s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:fr-horizontal-2
+ Normal HorizontalScaleDown 37s KubeDB Ops-manager Operator Successfully Scaled Down Node
+ Normal UpdateDatabase 37s KubeDB Ops-manager Operator Successfully updated FerretDB
+ Normal Starting 37s KubeDB Ops-manager Operator Resuming FerretDB database: demo/fr-horizontal
+ Normal Successful 37s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/fr-horizontal for FerretDBOpsRequest: ferretdb-horizontal-scale-down
+```
+
+Now, we are going to verify the number of replicas this ferretdb has from the FerretDB object, number of pods the petset have,
+
+```bash
+$ kubectl get fr -n demo fr-horizontal -o json | jq '.spec.replicas'
+2
+
+$ kubectl get petset -n demo fr-horizontal -o json | jq '.spec.replicas'
+2
+```
+From all the above outputs we can see that the replicas of the ferretdb is `2`. That means we have successfully scaled up the replicas of the FerretDB.
+
+## Cleaning Up
+
+To clean up the Kubernetes resources created by this tutorial, run:
+
+```bash
+kubectl delete mg -n fr-horizontal
+kubectl delete ferretdbopsrequest -n demo ferretdb-horizontal-scale-down
+```
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/horizontal-scaling/overview.md b/docs/guides/ferretdb/scaling/horizontal-scaling/overview.md
new file mode 100644
index 0000000000..b177fdc2d4
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/horizontal-scaling/overview.md
@@ -0,0 +1,54 @@
+---
+title: FerretDB Horizontal Scaling Overview
+menu:
+ docs_{{ .version }}:
+ identifier: fr-horizontal-scaling-overview
+ name: Overview
+ parent: fr-horizontal-scaling
+ weight: 10
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# FerretDB Horizontal Scaling
+
+This guide will give an overview on how KubeDB Ops-manager operator scales up or down `FerretDB` replicas of PetSet.
+
+## Before You Begin
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+
+## How Horizontal Scaling Process Works
+
+The following diagram shows how KubeDB Ops-manager operator scales up or down `FerretDB` database components. Open the image in a new tab to see the enlarged version.
+
+
+
+The Horizontal scaling process consists of the following steps:
+
+1. At first, a user creates a `FerretDB` Custom Resource (CR).
+
+2. `KubeDB` Provisioner operator watches the `FerretDB` CR.
+
+3. When the operator finds a `FerretDB` CR, it creates `PetSet` and related necessary stuff like secrets, services, etc.
+
+4. Then, in order to scale the `PetSet` of the `FerretDB` database the user creates a `FerretDBOpsRequest` CR with desired information.
+
+5. `KubeDB` Ops-manager operator watches the `FerretDBOpsRequest` CR.
+
+6. When it finds a `FerretDBOpsRequest` CR, it pauses the `FerretDB` object which is referred from the `FerretDBOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `FerretDB` object during the horizontal scaling process.
+
+7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `FerretDBOpsRequest` CR.
+
+8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `FerretDB` object to reflect the updated state.
+
+9. After the successful scaling of the `FerretDB` replicas, the `KubeDB` Ops-manager operator resumes the `FerretDB` object so that the `KubeDB` Provisioner operator resumes its usual operations.
+
+In the next docs, we are going to show a step-by-step guide on horizontal scaling of FerretDB using `FerretDBOpsRequest` CRD.
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/vertical-scaling/_index.md b/docs/guides/ferretdb/scaling/vertical-scaling/_index.md
new file mode 100644
index 0000000000..2597e01702
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/vertical-scaling/_index.md
@@ -0,0 +1,10 @@
+---
+title: Vertical Scaling
+menu:
+ docs_{{ .version }}:
+ identifier: fr-vertical-scaling
+ name: Vertical Scaling
+ parent: fr-scaling
+ weight: 20
+menu_name: docs_{{ .version }}
+---
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/vertical-scaling/overview.md b/docs/guides/ferretdb/scaling/vertical-scaling/overview.md
new file mode 100644
index 0000000000..f5d5f369c1
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/vertical-scaling/overview.md
@@ -0,0 +1,54 @@
+---
+title: FerretDB Vertical Scaling Overview
+menu:
+ docs_{{ .version }}:
+ identifier: fr-vertical-scaling-overview
+ name: Overview
+ parent: fr-vertical-scaling
+ weight: 10
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# FerretDB Vertical Scaling
+
+This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `FerretDB`.
+
+## Before You Begin
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+
+## How Vertical Scaling Process Works
+
+The following diagram shows how KubeDB Ops-manager operator updates the resources of the `FerretDB`. Open the image in a new tab to see the enlarged version.
+
+
+
+The vertical scaling process consists of the following steps:
+
+1. At first, a user creates a `FerretDB` Custom Resource (CR).
+
+2. `KubeDB` Provisioner operator watches the `FerretDB` CR.
+
+3. When the operator finds a `FerretDB` CR, it creates `PetSet` and related necessary stuff like secrets, services, etc.
+
+4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `FerretDB`, the user creates a `FerretDBOpsRequest` CR with desired information.
+
+5. `KubeDB` Ops-manager operator watches the `FerretDBOpsRequest` CR.
+
+6. When it finds a `FerretDBOpsRequest` CR, it pauses the `FerretDB` object which is referred from the `FerretDBOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `FerretDB` object during the vertical scaling process.
+
+7. Then the `KubeDB` Ops-manager operator will update resources of the PetSet to reach desired state.
+
+8. After the successful update of the resources of the PetSet's replica, the `KubeDB` Ops-manager operator updates the `FerretDB` object to reflect the updated state.
+
+9. After the successful update of the `FerretDB` resources, the `KubeDB` Ops-manager operator resumes the `FerretDB` object so that the `KubeDB` Provisioner operator resumes its usual operations.
+
+In the next docs, we are going to show a step-by-step guide on updating resources of FerretDB `FerretDBOpsRequest` CRD.
\ No newline at end of file
diff --git a/docs/guides/ferretdb/scaling/vertical-scaling/vertical-ops.md b/docs/guides/ferretdb/scaling/vertical-scaling/vertical-ops.md
new file mode 100644
index 0000000000..b22b46ea70
--- /dev/null
+++ b/docs/guides/ferretdb/scaling/vertical-scaling/vertical-ops.md
@@ -0,0 +1,282 @@
+---
+title: Vertical Scaling FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-vertical-scaling-ops
+ name: VerticalScaling OpsRequest
+ parent: fr-vertical-scaling
+ weight: 20
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# Vertical Scale FerretDB
+
+This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a FerretDB.
+
+## Before You Begin
+
+- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
+
+- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md).
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+ - [Vertical Scaling Overview](/docs/guides/ferretdb/scaling/vertical-scaling/overview.md)
+
+To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial.
+
+```bash
+$ kubectl create ns demo
+namespace/demo created
+```
+
+> **Note:** YAML files used in this tutorial are stored in [docs/examples/ferretdb](/docs/examples/ferretdb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository.
+
+## Apply Vertical Scaling on FerretDB
+
+Here, we are going to deploy a `FerretDB` using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it.
+
+### Prepare FerretDB
+
+Now, we are going to deploy a `FerretDB` with version `1.23.0`.
+
+### Deploy FerretDB
+
+In this section, we are going to deploy a FerretDB. Then, in the next section we will update the resources using `FerretDBOpsRequest` CRD. Below is the YAML of the `FerretDB` CR that we are going to create,
+
+```yaml
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-vertical
+ namespace: demo
+spec:
+ version: "1.23.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
+```
+
+Let's create the `FerretDB` CR we have shown above,
+
+```bash
+$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/scaling/fr-vertical.yaml
+ferretdb.kubedb.com/fr-vertical created
+```
+
+Now, wait until `fr-vertical` has status `Ready`. i.e,
+
+```bash
+$ kubectl get fr -n demo
+NAME TYPE VERSION STATUS AGE
+fr-vertical kubedb.com/v1alpha2 1.23.0 Ready 17s
+```
+
+Let's check the Pod containers resources,
+
+```bash
+$ kubectl get pod -n demo fr-vertical-0 -o json | jq '.spec.containers[].resources'
+{
+ "limits": {
+ "memory": "1Gi"
+ },
+ "requests": {
+ "cpu": "500m",
+ "memory": "1Gi"
+ }
+}
+```
+
+You can see the Pod has default resources which is assigned by the KubeDB operator.
+
+We are now ready to apply the `FerretDBOpsRequest` CR to update the resources of this ferretdb.
+
+### Vertical Scaling
+
+Here, we are going to update the resources of the ferretdb to meet the desired resources after scaling.
+
+#### Create FerretDBOpsRequest
+
+In order to update the resources of the ferretdb, we have to create a `FerretDBOpsRequest` CR with our desired resources. Below is the YAML of the `FerretDBOpsRequest` CR that we are going to create,
+
+```yaml
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-scale-vertical
+ namespace: demo
+spec:
+ type: VerticalScaling
+ databaseRef:
+ name: fr-vertical
+ verticalScaling:
+ node:
+ resources:
+ requests:
+ memory: "2Gi"
+ cpu: "1"
+ limits:
+ memory: "2Gi"
+ cpu: "1"
+ timeout: 5m
+ apply: IfReady
+```
+
+Here,
+
+- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `fr-vertical` ferretdb.
+- `spec.type` specifies that we are performing `VerticalScaling` on our database.
+- `spec.VerticalScaling.standalone` specifies the desired resources after scaling.
+- Have a look [here](/docs/guides/ferretdb/concepts/opsrequest.md) on the respective sections to understand the `timeout` & `apply` fields.
+
+Let's create the `FerretDBOpsRequest` CR we have shown above,
+
+```bash
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/scaling/vertical-scaling/fr-vertical-ops.yaml
+ferretdbopsrequest.ops.kubedb.com/ferretdb-scale-vertical created
+```
+
+#### Verify FerretDB resources updated successfully
+
+If everything goes well, `KubeDB` Ops-manager operator will update the resources of `FerretDB` object and related `PetSet` and `Pods`.
+
+Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following command to watch `FerretDBOpsRequest` CR,
+
+```bash
+$ kubectl get ferretdbopsrequest -n demo
+Every 2.0s: kubectl get ferretdbopsrequest -n demo
+NAME TYPE STATUS AGE
+ferretdb-scale-vertical VerticalScaling Successful 44s
+```
+
+We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed to scale the ferretdb.
+
+```bash
+$ kubectl describe ferretdbopsrequest -n demo ferretdb-scale-vertical
+Name: ferretdb-scale-vertical
+Namespace: demo
+Labels:
+Annotations:
+API Version: ops.kubedb.com/v1alpha1
+Kind: FerretDBOpsRequest
+Metadata:
+ Creation Timestamp: 2024-10-21T12:25:33Z
+ Generation: 1
+ Resource Version: 366310
+ UID: 38631646-684f-4c2a-8496-c7b085743243
+Spec:
+ Apply: IfReady
+ Database Ref:
+ Name: fr-vertical
+ Timeout: 5m
+ Type: VerticalScaling
+ Vertical Scaling:
+ Node:
+ Resources:
+ Limits:
+ Cpu: 1
+ Memory: 2Gi
+ Requests:
+ Cpu: 1
+ Memory: 2Gi
+Status:
+ Conditions:
+ Last Transition Time: 2024-10-21T12:25:33Z
+ Message: FerretDB ops-request has started to vertically scaling the FerretDB nodes
+ Observed Generation: 1
+ Reason: VerticalScaling
+ Status: True
+ Type: VerticalScaling
+ Last Transition Time: 2024-10-21T12:25:36Z
+ Message: Successfully paused database
+ Observed Generation: 1
+ Reason: DatabasePauseSucceeded
+ Status: True
+ Type: DatabasePauseSucceeded
+ Last Transition Time: 2024-10-21T12:25:37Z
+ Message: Successfully updated PetSets Resources
+ Observed Generation: 1
+ Reason: UpdatePetSets
+ Status: True
+ Type: UpdatePetSets
+ Last Transition Time: 2024-10-21T12:25:42Z
+ Message: get pod; ConditionStatus:True; PodName:fr-vertical-0
+ Observed Generation: 1
+ Status: True
+ Type: GetPod--fr-vertical-0
+ Last Transition Time: 2024-10-21T12:25:42Z
+ Message: evict pod; ConditionStatus:True; PodName:fr-vertical-0
+ Observed Generation: 1
+ Status: True
+ Type: EvictPod--fr-vertical-0
+ Last Transition Time: 2024-10-21T12:25:47Z
+ Message: check pod running; ConditionStatus:True; PodName:fr-vertical-0
+ Observed Generation: 1
+ Status: True
+ Type: CheckPodRunning--fr-vertical-0
+ Last Transition Time: 2024-10-21T12:25:52Z
+ Message: Successfully Restarted Pods With Resources
+ Observed Generation: 1
+ Reason: RestartPods
+ Status: True
+ Type: RestartPods
+ Last Transition Time: 2024-10-21T12:25:52Z
+ Message: Successfully completed the VerticalScaling for FerretDB
+ Observed Generation: 1
+ Reason: Successful
+ Status: True
+ Type: Successful
+ Observed Generation: 1
+ Phase: Successful
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 58s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ferretdb-scale-vertical
+ Normal Starting 58s KubeDB Ops-manager Operator Pausing FerretDB database: demo/fr-vertical
+ Normal Successful 58s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/fr-vertical for FerretDBOpsRequest: ferretdb-scale-vertical
+ Normal UpdatePetSets 54s KubeDB Ops-manager Operator Successfully updated PetSets Resources
+ Warning get pod; ConditionStatus:True; PodName:fr-vertical-0 49s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:fr-vertical-0
+ Warning evict pod; ConditionStatus:True; PodName:fr-vertical-0 49s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:fr-vertical-0
+ Warning check pod running; ConditionStatus:True; PodName:fr-vertical-0 44s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:fr-vertical-0
+ Normal RestartPods 39s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources
+ Normal Starting 39s KubeDB Ops-manager Operator Resuming FerretDB database: demo/fr-vertical
+ Normal Successful 39s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/fr-vertical for FerretDBOpsRequest: ferretdb-scale-vertical
+```
+
+Now, we are going to verify from the Pod yaml whether the resources of the ferretdb has updated to meet up the desired state, Let's check,
+
+```bash
+$ kubectl get pod -n demo fr-vertical-0 -o json | jq '.spec.containers[].resources'
+{
+ "limits": {
+ "cpu": "1",
+ "memory": "2Gi"
+ },
+ "requests": {
+ "cpu": "1",
+ "memory": "2Gi"
+ }
+}
+```
+
+The above output verifies that we have successfully scaled up the resources of the FerretDB.
+
+## Cleaning Up
+
+To clean up the Kubernetes resources created by this tutorial, run:
+
+```bash
+kubectl delete fr -n demo fr-vertical
+kubectl delete ferretdbopsrequest -n demo ferretdb-scale-vertical
+```
\ No newline at end of file
diff --git a/docs/guides/ferretdb/tls/configure_tls.md b/docs/guides/ferretdb/tls/configure_tls.md
index fbcb9a6d04..82480afc9b 100644
--- a/docs/guides/ferretdb/tls/configure_tls.md
+++ b/docs/guides/ferretdb/tls/configure_tls.md
@@ -3,7 +3,7 @@ title: FerretDB TLS/SSL Encryption
menu:
docs_{{ .version }}:
identifier: fr-tls-configure
- name: FerretDB_SSL
+ name: FerretDB TLS/SSL Configuration
parent: fr-tls
weight: 20
menu_name: docs_{{ .version }}
@@ -131,10 +131,10 @@ spec:
```bash
$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/tls/ferretdb-tls.yaml
-ferretdb.kubedb.com/pp-tls created
+ferretdb.kubedb.com/fr-tls created
```
-Now, wait until `pp-tls created` has status `Ready`. i.e,
+Now, wait until `fr-tls created` has status `Ready`. i.e,
```bash
$ watch kubectl get fr -n demo
@@ -214,7 +214,7 @@ For mongosh info see: https://docs.mongodb.com/mongodb-shell/
------
The server generated these startup warnings when booting
- 2024-03-12T05:56:50.979Z: Powered by FerretDB v1.18.0 and PostgreSQL 13.13 on x86_64-pc-linux-musl, compiled by gcc.
+ 2024-03-12T05:56:50.979Z: Powered by FerretDB v1.23.0 and PostgreSQL 13.13 on x86_64-pc-linux-musl, compiled by gcc.
2024-03-12T05:56:50.979Z: Please star us on GitHub: https://github.com/FerretDB/FerretDB.
2024-03-12T05:56:50.979Z: The telemetry state is undecided.
2024-03-12T05:56:50.979Z: Read more about FerretDB telemetry and how to opt out at https://beacon.ferretdb.io.
diff --git a/docs/guides/ferretdb/update-version/_index.md b/docs/guides/ferretdb/update-version/_index.md
new file mode 100644
index 0000000000..e0266958ab
--- /dev/null
+++ b/docs/guides/ferretdb/update-version/_index.md
@@ -0,0 +1,10 @@
+---
+title: Updating FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-updating
+ name: UpdateVersion
+ parent: fr-ferretdb-guides
+ weight: 42
+menu_name: docs_{{ .version }}
+---
\ No newline at end of file
diff --git a/docs/guides/ferretdb/update-version/overview.md b/docs/guides/ferretdb/update-version/overview.md
new file mode 100644
index 0000000000..0b20a2e9b9
--- /dev/null
+++ b/docs/guides/ferretdb/update-version/overview.md
@@ -0,0 +1,54 @@
+---
+title: Updating FerretDB Overview
+menu:
+ docs_{{ .version }}:
+ identifier: fr-updating-overview
+ name: Overview
+ parent: fr-updating
+ weight: 10
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# updating FerretDB version Overview
+
+This guide will give you an overview on how KubeDB Ops-manager operator update the version of `FerretDB`.
+
+## Before You Begin
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+
+## How update version Process Works
+
+The following diagram shows how KubeDB Ops-manager operator used to update the version of `FerretDB`. Open the image in a new tab to see the enlarged version.
+
+
+
+The updating process consists of the following steps:
+
+1. At first, a user creates a `FerretDB` Custom Resource (CR).
+
+2. `KubeDB` Provisioner operator watches the `FerretDB` CR.
+
+3. When the operator finds a `FerretDB` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc.
+
+4. Then, in order to update the version of the `FerretDB` the user creates a `FerretDBOpsRequest` CR with the desired version.
+
+5. `KubeDB` Ops-manager operator watches the `FerretDBOpsRequest` CR.
+
+6. When it finds a `FerretDBOpsRequest` CR, it halts the `FerretDB` object which is referred from the `FerretDBOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `FerretDB` object during the updating process.
+
+7. By looking at the target version from `FerretDBOpsRequest` CR, `KubeDB` Ops-manager operator updates the image of the `PetSet`.
+
+8. After successfully updating the `PetSet` and their `Pods` images, the `KubeDB` Ops-manager operator updates the image of the `FerretDB` object to reflect the updated state of the database.
+
+9. After successfully updating of `FerretDB` object, the `KubeDB` Ops-manager operator resumes the `FerretDB` object so that the `KubeDB` Provisioner operator can resume its usual operations.
+
+In the next doc, we are going to show a step-by-step guide on updating of a FerretDB using updateVersion operation.
\ No newline at end of file
diff --git a/docs/guides/ferretdb/update-version/update-version.md b/docs/guides/ferretdb/update-version/update-version.md
new file mode 100644
index 0000000000..d84da9d671
--- /dev/null
+++ b/docs/guides/ferretdb/update-version/update-version.md
@@ -0,0 +1,241 @@
+---
+title: Updating FerretDB
+menu:
+ docs_{{ .version }}:
+ identifier: fr-updating-ferretdb
+ name: Update version
+ parent: fr-updating
+ weight: 20
+menu_name: docs_{{ .version }}
+section_menu_id: guides
+---
+
+> New to KubeDB? Please start [here](/docs/README.md).
+
+# update version of FerretDB
+
+This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `FerretDB`.
+
+## Before You Begin
+
+- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
+
+- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md).
+
+- You should be familiar with the following `KubeDB` concepts:
+ - [FerretDB](/docs/guides/ferretdb/concepts/ferretdb.md)
+ - [FerretDBOpsRequest](/docs/guides/ferretdb/concepts/opsrequest.md)
+ - [Updating Overview](/docs/guides/ferretdb/update-version/overview.md)
+
+To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial.
+
+```bash
+$ kubectl create ns demo
+namespace/demo created
+```
+
+> **Note:** YAML files used in this tutorial are stored in [docs/examples/ferretdb](/docs/examples/ferretdb) directory of [kubedb/docs](https://github.com/kube/docs) repository.
+
+### Prepare FerretDB
+
+Now, we are going to deploy a `FerretDB` =with version `1.18.0`.
+
+### Deploy FerretDB:
+
+In this section, we are going to deploy a FerretDB. Then, in the next section we will update the version using `FerretDBOpsRequest` CRD. Below is the YAML of the `FerretDB` CR that we are going to create,
+
+```yaml
+apiVersion: kubedb.com/v1alpha2
+kind: FerretDB
+metadata:
+ name: fr-update
+ namespace: demo
+spec:
+ version: "1.18.0"
+ replicas: 1
+ backend:
+ externallyManaged: false
+ storage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Mi
+ deletionPolicy: WipeOut
+```
+
+Let's create the `FerretDB` CR we have shown above,
+
+```bash
+$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/update-version/fr-update.yaml
+ferretdb.kubedb.com/fr-update created
+```
+
+Now, wait until `fr-update` created has status `Ready`. i.e,
+
+```bash
+$ kubectl get fr -n demo
+ NAME TYPE VERSION STATUS AGE
+ fr-update kubedb.com/v1alpha2 1.18.0 Ready 26s
+```
+
+We are now ready to apply the `FerretDBOpsRequest` CR to update this FerretDB.
+
+### update FerretDB Version
+
+Here, we are going to update `FerretDB` from `1.18.0` to `1.23.0`.
+
+#### Create FerretDBOpsRequest:
+
+In order to update the FerretDB, we have to create a `FerretDBOpsRequest` CR with your desired version that is supported by `KubeDB`. Below is the YAML of the `FerretDBOpsRequest` CR that we are going to create,
+
+```yaml
+apiVersion: ops.kubedb.com/v1alpha1
+kind: FerretDBOpsRequest
+metadata:
+ name: ferretdb-version-update
+ namespace: demo
+spec:
+ type: UpdateVersion
+ databaseRef:
+ name: fr-update
+ updateVersion:
+ targetVersion: 1.23.0
+```
+
+Here,
+
+- `spec.databaseRef.name` specifies that we are performing operation on `fr-update` FerretDB.
+- `spec.type` specifies that we are going to perform `UpdateVersion` on our FerretDB.
+- `spec.updateVersion.targetVersion` specifies the expected version of the FerretDB `1.23.0`.
+
+
+Let's create the `FerretDBOpsRequest` CR we have shown above,
+
+```bash
+$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ferretdb/update-version/frops-update.yaml
+ferretdbopsrequest.ops.kubedb.com/ferretdb-version-update created
+```
+
+#### Verify FerretDB version updated successfully :
+
+If everything goes well, `KubeDB` Ops-manager operator will update the image of `FerretDB` object and related `PetSets` and `Pods`.
+
+Let's wait for `FerretDBOpsRequest` to be `Successful`. Run the following command to watch `FerretDBOpsRequest` CR,
+
+```bash
+$ watch kubectl get ferretdbopsrequest -n demo
+Every 2.0s: kubectl get ferretdbopsrequest -n demo
+NAME TYPE STATUS AGE
+ferretdb-version-update UpdateVersion Successful 93s
+```
+
+We can see from the above output that the `FerretDBOpsRequest` has succeeded. If we describe the `FerretDBOpsRequest` we will get an overview of the steps that were followed to update the FerretDB.
+
+```bash
+$ kubectl describe ferretdbopsrequest -n demo ferretdb-version-update
+Name: ferretdb-version-update
+Namespace: demo
+Labels:
+Annotations:
+API Version: ops.kubedb.com/v1alpha1
+Kind: FerretDBOpsRequest
+Metadata:
+ Creation Timestamp: 2024-10-21T05:06:17Z
+ Generation: 1
+ Resource Version: 324860
+ UID: 30d486a6-a8fe-4d82-a8b3-f13e299ef035
+Spec:
+ Apply: IfReady
+ Database Ref:
+ Name: fr-update
+ Type: UpdateVersion
+ Update Version:
+ Target Version: 1.23.0
+Status:
+ Conditions:
+ Last Transition Time: 2024-10-21T05:06:17Z
+ Message: FerretDB ops-request has started to update version
+ Observed Generation: 1
+ Reason: UpdateVersion
+ Status: True
+ Type: UpdateVersion
+ Last Transition Time: 2024-10-21T05:06:25Z
+ Message: successfully reconciled the FerretDB with updated version
+ Observed Generation: 1
+ Reason: UpdatePetSets
+ Status: True
+ Type: UpdatePetSets
+ Last Transition Time: 2024-10-21T05:06:30Z
+ Message: get pod; ConditionStatus:True; PodName:fr-update-0
+ Observed Generation: 1
+ Status: True
+ Type: GetPod--fr-update-0
+ Last Transition Time: 2024-10-21T05:06:30Z
+ Message: evict pod; ConditionStatus:True; PodName:fr-update-0
+ Observed Generation: 1
+ Status: True
+ Type: EvictPod--fr-update-0
+ Last Transition Time: 2024-10-21T05:06:35Z
+ Message: check pod running; ConditionStatus:True; PodName:fr-update-0
+ Observed Generation: 1
+ Status: True
+ Type: CheckPodRunning--fr-update-0
+ Last Transition Time: 2024-10-21T05:06:40Z
+ Message: Successfully Restarted FerretDB pods
+ Observed Generation: 1
+ Reason: RestartPods
+ Status: True
+ Type: RestartPods
+ Last Transition Time: 2024-10-21T05:06:40Z
+ Message: Successfully updated FerretDB
+ Observed Generation: 1
+ Reason: UpdateDatabase
+ Status: True
+ Type: UpdateDatabase
+ Last Transition Time: 2024-10-21T05:06:40Z
+ Message: Successfully updated FerretDB version
+ Observed Generation: 1
+ Reason: Successful
+ Status: True
+ Type: Successful
+ Observed Generation: 1
+ Phase: Successful
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Starting 59s KubeDB Ops-manager Operator Start processing for FerretDBOpsRequest: demo/ferretdb-version-update
+ Normal Starting 59s KubeDB Ops-manager Operator Pausing FerretDB database: demo/fr-update
+ Normal Successful 59s KubeDB Ops-manager Operator Successfully paused FerretDB database: demo/fr-update for FerretDBOpsRequest: ferretdb-version-update
+ Normal UpdatePetSets 51s KubeDB Ops-manager Operator successfully reconciled the FerretDB with updated version
+ Warning get pod; ConditionStatus:True; PodName:fr-update-0 46s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:fr-update-0
+ Warning evict pod; ConditionStatus:True; PodName:fr-update-0 46s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:fr-update-0
+ Warning check pod running; ConditionStatus:True; PodName:fr-update-0 41s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:fr-update-0
+ Normal RestartPods 36s KubeDB Ops-manager Operator Successfully Restarted FerretDB pods
+ Normal Starting 36s KubeDB Ops-manager Operator Resuming FerretDB database: demo/fr-update
+ Normal Successful 36s KubeDB Ops-manager Operator Successfully resumed FerretDB database: demo/fr-update for FerretDBOpsRequest: ferretdb-version-update
+```
+
+Now, we are going to verify whether the `FerretDB` and the related `PetSets` their `Pods` have the new version image. Let's check,
+
+```bash
+$ kubectl get fr -n demo fr-update -o=jsonpath='{.spec.version}{"\n"}'
+1.23.0
+
+$ kubectl get petset -n demo fr-update -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}'
+ghcr.io/appscode-images/ferretdb:1.23.0
+
+$ kubectl get pods -n demo fr-update-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}'
+ghcr.io/appscode-images/ferretdb:1.23.0
+```
+
+You can see from above, our `FerretDB` has been updated with the new version. So, the update process is successfully completed.
+
+## Cleaning Up
+
+To clean up the Kubernetes resources created by this tutorial, run:
+
+```bash
+kubectl delete fr -n demo fr-update
+kubectl delete ferretdbopsrequest -n demo ferretdb-version-update
+```
\ No newline at end of file
diff --git a/docs/guides/pgpool/update-version/update_version.md b/docs/guides/pgpool/update-version/update_version.md
index 7466e0d713..a63c0c9485 100644
--- a/docs/guides/pgpool/update-version/update_version.md
+++ b/docs/guides/pgpool/update-version/update_version.md
@@ -228,9 +228,9 @@ $ kubectl get pp -n demo pp-update -o=jsonpath='{.spec.version}{"\n"}'
4.5.0
$ kubectl get petset -n demo pp-update -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}'
-mongo:4.0.5
+ghcr.io/appscode-images/pgpool2:4.5.0
-$ kubectl get pods -n demo mg-standalone-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}'
+$ kubectl get pods -n demo pp-update-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}'
ghcr.io/appscode-images/pgpool2:4.5.0@sha256:2697fcad9e11bdc704f6ae0fba85c4451c6b0243140aaaa33e719c3af548bda1
```
diff --git a/docs/images/ferretdb/fr-builtin-prom-target.png b/docs/images/ferretdb/fr-builtin-prom-target.png
new file mode 100644
index 0000000000..a483bb5faf
Binary files /dev/null and b/docs/images/ferretdb/fr-builtin-prom-target.png differ
diff --git a/docs/images/ferretdb/fr-horizontal-scaling.svg b/docs/images/ferretdb/fr-horizontal-scaling.svg
new file mode 100644
index 0000000000..c7349f3849
--- /dev/null
+++ b/docs/images/ferretdb/fr-horizontal-scaling.svg
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/docs/images/ferretdb/fr-update.svg b/docs/images/ferretdb/fr-update.svg
new file mode 100644
index 0000000000..5c28780bf3
--- /dev/null
+++ b/docs/images/ferretdb/fr-update.svg
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/docs/images/ferretdb/fr-vertical-scaling.svg b/docs/images/ferretdb/fr-vertical-scaling.svg
new file mode 100644
index 0000000000..c7349f3849
--- /dev/null
+++ b/docs/images/ferretdb/fr-vertical-scaling.svg
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file