diff --git a/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml b/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml new file mode 100644 index 0000000000..be2241531d --- /dev/null +++ b/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rm-cluster + namespace: demo +spec: + version: "3.13.2" + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configSecret: + name: rabbit-custom-config \ No newline at end of file diff --git a/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml b/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml new file mode 100644 index 0000000000..ee47631df1 --- /dev/null +++ b/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: RabbitMQOpsRequest +metadata: + name: reconfigure-rm-cluster + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: rm-cluster + configuration: + configSecret: + name: new-custom-config + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/rabbitmq/opsrequests/rabbitmq-reconfigure-apply.yaml b/docs/examples/rabbitmq/opsrequests/rabbitmq-reconfigure-apply.yaml new file mode 100644 index 0000000000..f838f1f3a8 --- /dev/null +++ b/docs/examples/rabbitmq/opsrequests/rabbitmq-reconfigure-apply.yaml @@ -0,0 +1,15 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: RabbitMQOpsRequest +metadata: + name: reconfigure-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: rm-cluster + configuration: + applyConfig: + rabbitmq.conf: | + default_vhost = /newvhost + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/rabbitmq/quickstart/quickstart.yaml b/docs/examples/rabbitmq/quickstart/quickstart.yaml index 5933e579ed..0a0503004e 100644 --- a/docs/examples/rabbitmq/quickstart/quickstart.yaml +++ b/docs/examples/rabbitmq/quickstart/quickstart.yaml @@ -4,7 +4,7 @@ metadata: name: rm-quickstart namespace: demo spec: - version: "3.12.12" + version: "3.13.2" replicas: 3 storage: accessModes: diff --git a/docs/guides/rabbitmq/autoscaler/_index.md b/docs/guides/rabbitmq/autoscaler/_index.md index bc2c9a589e..2a6ebd50ff 100644 --- a/docs/guides/rabbitmq/autoscaler/_index.md +++ b/docs/guides/rabbitmq/autoscaler/_index.md @@ -2,9 +2,9 @@ title: Autoscaling menu: docs_{{ .version }}: - identifier: mg-auto-scaling + identifier: rm-autoscaling name: Autoscaling - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 46 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/autoscaler/compute/_index.md b/docs/guides/rabbitmq/autoscaler/compute/_index.md index 31a2328359..59e764ac91 100644 --- a/docs/guides/rabbitmq/autoscaler/compute/_index.md +++ b/docs/guides/rabbitmq/autoscaler/compute/_index.md @@ -2,9 +2,9 @@ title: Compute Autoscaling menu: docs_{{ .version }}: - identifier: mg-compute-auto-scaling + identifier: rm-autoscaling-compute name: Compute Autoscaling - parent: mg-auto-scaling + parent: rm-autoscaling weight: 46 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/autoscaler/compute/compute-autoscale.md b/docs/guides/rabbitmq/autoscaler/compute/compute-autoscale.md new file mode 100644 index 0000000000..c07979f3c8 --- /dev/null +++ b/docs/guides/rabbitmq/autoscaler/compute/compute-autoscale.md @@ -0,0 +1,440 @@ +--- +title: RabbitMQ Compute Resource Autoscaling +menu: + docs_{{ .version }}: + identifier: rm-autoscaling-compute-description + name: rabbitmq-compute + parent: rm-autoscaling + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of a RabbitMQ + +This guide will show you how to use `KubeDB` to autoscaling compute resources i.e. cpu and memory of a RabbitMQ. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQAutoscaler](/docs/guides/rabbitmq/concepts/autoscaler.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) + - [Compute Resource Autoscaling Overview](/docs/guides/rabbitmq/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/rabbitmq](/docs/examples/rabbitmq) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of RabbitMQ + +In this section, we are going to deploy a RabbitMQ with version `3.13.2` Then, in the next section we will set up autoscaling for this RabbitMQ using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-autoscale + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut + podTemplate: + spec: + containers: + - name: rabbitmq + resources: + requests: + cpu: "0.5m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + serviceTemplates: + - alias: primary + spec: + type: LoadBalancer +``` + +Let's create the `RabbitMQ` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/autoscaling/compute/rabbitmq-autoscale.yaml +rabbitmq.kubedb.com/rabbitmq-autoscale created +``` + +Now, wait until `rabbitmq-autoscale` has status `Ready`. i.e, + +```bash +$ kubectl get pp -n demo +NAME TYPE VERSION STATUS AGE +rabbitmq-autoscale kubedb.com/v1alpha2 3.13.2 Ready 22s +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo rabbitmq-autoscale-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "0.5m", + "memory": "1Gi" + } +} +``` + +Let's check the RabbitMQ resources, +```bash +$ kubectl get rabbitmq -n demo rabbitmq-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "0.5m", + "memory": "1Gi" + } +} +``` + +You can see from the above outputs that the resources are same as the one we have assigned while deploying the rabbitmq. + +We are now ready to apply the `RabbitMQAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute (cpu and memory) autoscaling using a RabbitMQAutoscaler Object. + +#### Create RabbitMQAutoscaler Object + +In order to set up compute resource autoscaling for this RabbitMQ, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: RabbitMQAutoscaler +metadata: + name: rabbitmq-autoscale-ops + namespace: demo +spec: + databaseRef: + name: rabbitmq-autoscale + compute: + rabbitmq: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 600m + memory: 1.2Gi + maxAllowed: + cpu: 1 + memory: 2Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource autoscaling on `rabbitmq-autoscale`. +- `spec.compute.rabbitmq.trigger` specifies that compute resource autoscaling is enabled for this rabbitmq. +- `spec.compute.rabbitmq.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. + If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.rabbitmq.minAllowed` specifies the minimum allowed resources for this rabbitmq. +- `spec.compute.rabbitmq.maxAllowed` specifies the maximum allowed resources for this rabbitmq. +- `spec.compute.rabbitmq.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.rabbitmq.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 2 fields. Know more about them here : [timeout](/docs/guides/rabbitmq/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/rabbitmq/concepts/opsrequest.md#specapply). + +Let's create the `RabbitMQAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/autoscaling/compute/rabbitmq-autoscaler.yaml +rabbitmqautoscaler.autoscaling.kubedb.com/rabbitmq-autoscaler-ops created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `rabbitmqautoscaler` resource is created successfully, + +```bash +$ kubectl get rabbitmqautoscaler -n demo +NAME AGE +rabbitmq-autoscale-ops 6m55s + +$ kubectl describe rabbitmqautoscaler rabbitmq-autoscale-ops -n demo +Name: rabbitmq-autoscale-ops +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: RabbitMQAutoscaler +Metadata: + Creation Timestamp: 2024-07-17T12:09:17Z + Generation: 1 + Resource Version: 81569 + UID: 3841c30b-3b19-4740-82f5-bf8e257ddc18 +Spec: + Compute: + rabbitmq: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 1Gi + Min Allowed: + Cpu: 600m + Memory: 1.2Gi + Pod Life Time Threshold: 5m0s + Resource Diff Percentage: 20 + Trigger: On + Database Ref: + Name: rabbitmq-autoscale + Ops Request Options: + Apply: IfReady +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Reference Timestamp: 2024-07-17T12:10:00Z + Total Weight: 0.8733542386168607 + First Sample Start: 2024-07-17T12:09:14Z + Last Sample Start: 2024-07-17T12:15:06Z + Last Update Time: 2024-07-17T12:15:38Z + Memory Histogram: + Bucket Weights: + Index: 11 + Weight: 10000 + Reference Timestamp: 2024-07-17T12:15:00Z + Total Weight: 0.7827734162991002 + Ref: + Container Name: rabbitmq + Vpa Object Name: rabbitmq-autoscale + Total Samples Count: 6 + Version: v3 + Conditions: + Last Transition Time: 2024-07-17T12:10:37Z + Message: Successfully created RabbitMQOpsRequest demo/ppops-rabbitmq-autoscale-zzell6 + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-07-17T12:09:37Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: rabbitmq + Lower Bound: + Cpu: 600m + Memory: 1.2Gi + Target: + Cpu: 600m + Memory: 1.2Gi + Uncapped Target: + Cpu: 500m + Memory: 2621445k + Upper Bound: + Cpu: 1 + Memory: 2Gi + Vpa Name: rabbitmq-autoscale +Events: +``` +So, the `RabbitMQautoscaler` resource is created successfully. + +you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our RabbitMQ. Our autoscaler operator continuously watches the recommendation generated and creates an `rabbitmqopsrequest` based on the recommendations, if the rabbitmq pods are needed to scaled up or down. + +Let's watch the `rabbitmqopsrequest` in the demo namespace to see if any `rabbitmqopsrequest` object is created. After some time you'll see that a `rabbitmqopsrequest` will be created based on the recommendation. + +```bash +$ watch kubectl get rabbitmqopsrequest -n demo +Every 2.0s: kubectl get rabbitmqopsrequest -n demo +NAME TYPE STATUS AGE +ppops-rabbitmq-autoscale-zzell6 VerticalScaling Progressing 1m48s +``` + +Let's wait for the ops request to become successful. + +```bash +$ watch kubectl get rabbitmqopsrequest -n demo +Every 2.0s: kubectl get rabbitmqopsrequest -n demo +NAME TYPE STATUS AGE +ppops-rabbitmq-autoscale-zzell6 VerticalScaling Successful 3m40s +``` + +We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the RabbitMQ. + +```bash +$ kubectl describe rabbitmqopsrequest -n demo ppops-rabbitmq-autoscale-zzell6 +Name: ppops-rabbitmq-autoscale-zzell6 +Namespace: demo +Labels: app.kubernetes.io/component=connection-pooler + app.kubernetes.io/instance=rabbitmq-autoscale + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=rabbitmqs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: RabbitMQOpsRequest +Metadata: + Creation Timestamp: 2024-07-17T12:10:37Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: RabbitMQAutoscaler + Name: rabbitmq-autoscale-ops + UID: 3841c30b-3b19-4740-82f5-bf8e257ddc18 + Resource Version: 81200 + UID: 57f99d31-af3d-4157-aa61-0f509ec89bbd +Spec: + Apply: IfReady + Database Ref: + Name: rabbitmq-autoscale + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 400m + Memory: 400Mi + Requests: + Cpu: 400m + Memory: 400Mi +Status: + Conditions: + Last Transition Time: 2024-07-17T12:10:37Z + Message: RabbitMQ ops-request has started to vertically scaling the RabbitMQ nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-07-17T12:10:40Z + Message: Successfully paused database + Observed Generation: 1 + Reason: DatabasePauseSucceeded + Status: True + Type: DatabasePauseSucceeded + Last Transition Time: 2024-07-17T12:10:40Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-07-17T12:11:25Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-07-17T12:10:45Z + Message: get pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Observed Generation: 1 + Status: True + Type: GetPod--rabbitmq-autoscale-0 + Last Transition Time: 2024-07-17T12:10:45Z + Message: evict pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Observed Generation: 1 + Status: True + Type: EvictPod--rabbitmq-autoscale-0 + Last Transition Time: 2024-07-17T12:11:20Z + Message: check pod running; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--rabbitmq-autoscale-0 + Last Transition Time: 2024-07-17T12:11:26Z + Message: Successfully completed the vertical scaling for rabbitmq + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m19s KubeDB Ops-manager Operator Start processing for rabbitmqOpsRequest: demo/ppops-rabbitmq-autoscale-zzell6 + Normal Starting 8m19s KubeDB Ops-manager Operator Pausing rabbitmq databse: demo/rabbitmq-autoscale + Normal Successful 8m19s KubeDB Ops-manager Operator Successfully paused rabbitmq database: demo/rabbitmq-autoscale for rabbitmqOpsRequest: ppops-rabbitmq-autoscale-zzell6 + Normal UpdatePetSets 8m16s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 8m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Warning evict pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 8m11s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Warning check pod running; ConditionStatus:False; PodName:rabbitmq-autoscale-0 8m6s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:rabbitmq-autoscale-0 + Warning check pod running; ConditionStatus:True; PodName:rabbitmq-autoscale-0 7m36s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:rabbitmq-autoscale-0 + Normal RestartPods 7m31s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 7m31s KubeDB Ops-manager Operator Resuming rabbitmq database: demo/rabbitmq-autoscale + Normal Successful 7m30s KubeDB Ops-manager Operator Successfully resumed RabbitMQ database: demo/rabbitmq-autoscale for RabbitMQOpsRequest: ppops-rabbitmq-autoscale-zzell6 +``` + +Now, we are going to verify from the Pod, and the RabbitMQ yaml whether the resources of the RabbitMQ has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo rabbitmq-autoscale-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "600m", + "memory": "1.2Gi" + } +} + +$ kubectl get rabbitmq -n demo rabbitmq-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "600m", + "memory": "1.2Gi" + } +} +``` + + +The above output verifies that we have successfully auto-scaled the resources of the rabbitmq. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete rm -n demo rabbitmq-autoscale +kubectl delete rabbitmqautoscaler -n demo rabbitmq-autoscale-ops +``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/autoscaler/compute/overview.md b/docs/guides/rabbitmq/autoscaler/compute/overview.md index 5c47bce796..0dd8380804 100644 --- a/docs/guides/rabbitmq/autoscaler/compute/overview.md +++ b/docs/guides/rabbitmq/autoscaler/compute/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ Compute Autoscaling Overview menu: docs_{{ .version }}: - identifier: mg-auto-scaling-overview + identifier: rm-autoscaling-compute-overview name: Overview - parent: mg-compute-auto-scaling + parent: rm-autoscaling weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -19,18 +19,14 @@ This guide will give an overview on how KubeDB Autoscaler operator autoscales th ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQAutoscaler](/docs/guides/rabbitmq/concepts/autoscaler.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Compute Autoscaling Works The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `RabbitMQ` database components. Open the image in a new tab to see the enlarged version. -
-  Compute Auto Scaling process of RabbitMQ -
Fig: Compute Auto Scaling process of RabbitMQ
-
The Auto Scaling process consists of the following steps: @@ -38,9 +34,9 @@ The Auto Scaling process consists of the following steps: 2. `KubeDB` Provisioner operator watches the `RabbitMQ` CRO. -3. When the operator finds a `RabbitMQ` CRO, it creates required number of `StatefulSets` and related necessary stuff like secrets, services, etc. +3. When the operator finds a `RabbitMQ` CRO, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. -4. Then, in order to set up autoscaling of the various components (ie. ReplicaSet, Shard, ConfigServer, Mongos, etc.) of the `RabbitMQ` database the user creates a `RabbitMQAutoscaler` CRO with desired configuration. +4. Then, in order to set up autoscaling of the of the `RabbitMQ` cluster the user creates a `RabbitMQAutoscaler` CRO with desired configuration. 5. `KubeDB` Autoscaler operator watches the `RabbitMQAutoscaler` CRO. diff --git a/docs/guides/rabbitmq/autoscaler/compute/replicaset.md b/docs/guides/rabbitmq/autoscaler/compute/replicaset.md deleted file mode 100644 index 4610c99039..0000000000 --- a/docs/guides/rabbitmq/autoscaler/compute/replicaset.md +++ /dev/null @@ -1,533 +0,0 @@ ---- -title: RabbitMQ Replicaset Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-auto-scaling-replicaset - name: Replicaset - parent: mg-compute-auto-scaling - weight: 20 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Autoscaling the Compute Resource of a RabbitMQ Replicaset Database - -This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a RabbitMQ replicaset database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Compute Resource Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/compute/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Autoscaling of Replicaset Database - -Here, we are going to deploy a `RabbitMQ` Replicaset using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ Replicaset - -In this section, we are going to deploy a RabbitMQ Replicaset database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-rs - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - replicas: 3 - storageType: Durable - storage: - resources: - requests: - storage: 1Gi - podTemplate: - spec: - resources: - requests: - cpu: "200m" - memory: "300Mi" - limits: - cpu: "200m" - memory: "300Mi" - terminationPolicy: WipeOut - -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-rs.yaml -RabbitMQ.kubedb.com/mg-rs created -``` - -Now, wait until `mg-rs` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-rs 4.4.26 Ready 2m53s -``` - -Let's check the Pod containers resources, - -```bash -$ kubectl get pod -n demo mg-rs-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -Let's check the RabbitMQ resources, -```bash -$ kubectl get RabbitMQ -n demo mg-rs -o json | jq '.spec.podTemplate.spec.resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -You can see from the above outputs that the resources are same as the one we have assigned while deploying the RabbitMQ. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up autoscaling for this database. - -### Compute Resource Autoscaling - -Here, we are going to set up compute resource autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up compute resource autoscaling for this replicaset database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as-rs - namespace: demo -spec: - databaseRef: - name: mg-rs - opsRequestOptions: - timeout: 3m - apply: IfReady - compute: - replicaSet: - trigger: "On" - podLifeTimeThreshold: 5m - resourceDiffPercentage: 20 - minAllowed: - cpu: 400m - memory: 400Mi - maxAllowed: - cpu: 1 - memory: 1Gi - controlledResources: ["cpu", "memory"] - containerControlledValues: "RequestsAndLimits" -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing compute resource scaling operation on `mg-rs` database. -- `spec.compute.replicaSet.trigger` specifies that compute autoscaling is enabled for this database. -- `spec.compute.replicaSet.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. -- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. - If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. -- `spec.compute.replicaSet.minAllowed` specifies the minimum allowed resources for the database. -- `spec.compute.replicaSet.maxAllowed` specifies the maximum allowed resources for the database. -- `spec.compute.replicaSet.controlledResources` specifies the resources that are controlled by the autoscaler. -- `spec.compute.replicaSet.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". -- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 3 fields. Know more about them here : [readinessCriteria](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria), [timeout](/docs/guides/RabbitMQ/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/RabbitMQ/concepts/opsrequest.md#specapply). - -If it was an `InMemory database`, we could also autoscaler the inMemory resources using RabbitMQ compute autoscaler, like below. - -#### Autoscale inMemory database -To autoscale inMemory databases, you need to specify the `spec.compute.replicaSet.inMemoryStorage` section. - -```yaml - ... - inMemoryStorage: - usageThresholdPercentage: 80 - scalingFactorPercentage: 30 - ... -``` -It has two fields inside it. -- `usageThresholdPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased. Default usage threshold is 70%. -- `scalingFactorPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased by this given scaling percentage. Default scaling percentage is 50%. - -> Note: To inform you, We use `db.serverStatus().inMemory.cache["bytes currently in the cache"]` & `db.serverStatus().inMemory.cache["maximum bytes configured"]` to calculate the used & maximum inMemory storage respectively. - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-as-rs.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as-rs created -``` - -#### Verify Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as-rs 102s - -$ kubectl describe RabbitMQautoscaler mg-as-rs -n demo -Name: mg-as-rs -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2022-10-27T06:56:34Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:compute: - .: - f:replicaSet: - .: - f:containerControlledValues: - f:controlledResources: - f:maxAllowed: - .: - f:cpu: - f:memory: - f:minAllowed: - .: - f:cpu: - f:memory: - f:podLifeTimeThreshold: - f:resourceDiffPercentage: - f:trigger: - f:databaseRef: - f:opsRequestOptions: - .: - f:apply: - f:timeout: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-10-27T06:56:34Z - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:checkpoints: - f:conditions: - f:vpas: - Manager: kubedb-autoscaler - Operation: Update - Subresource: status - Time: 2022-10-27T07:01:05Z - Resource Version: 640314 - UID: ab03414a-67a2-4da4-8960-6e67ae56b503 -Spec: - Compute: - Replica Set: - Container Controlled Values: RequestsAndLimits - Controlled Resources: - cpu - memory - Max Allowed: - Cpu: 1 - Memory: 1Gi - Min Allowed: - Cpu: 400m - Memory: 400Mi - Pod Life Time Threshold: 5m0s - Resource Diff Percentage: 20 - Trigger: On - Database Ref: - Name: mg-rs - Ops Request Options: - Apply: IfReady - Timeout: 3m0s -Status: - Checkpoints: - Cpu Histogram: - Bucket Weights: - Index: 2 - Weight: 10000 - Index: 3 - Weight: 5000 - Reference Timestamp: 2022-10-27T00:00:00Z - Total Weight: 0.3673624107285783 - First Sample Start: 2022-10-27T07:00:42Z - Last Sample Start: 2022-10-27T07:00:55Z - Last Update Time: 2022-10-27T07:01:00Z - Memory Histogram: - Reference Timestamp: 2022-10-28T00:00:00Z - Ref: - Container Name: RabbitMQ - Vpa Object Name: mg-rs - Total Samples Count: 3 - Version: v3 - Cpu Histogram: - Bucket Weights: - Index: 0 - Weight: 10000 - Reference Timestamp: 2022-10-27T00:00:00Z - Total Weight: 0.3673624107285783 - First Sample Start: 2022-10-27T07:00:42Z - Last Sample Start: 2022-10-27T07:00:55Z - Last Update Time: 2022-10-27T07:01:00Z - Memory Histogram: - Reference Timestamp: 2022-10-28T00:00:00Z - Ref: - Container Name: replication-mode-detector - Vpa Object Name: mg-rs - Total Samples Count: 3 - Version: v3 - Conditions: - Last Transition Time: 2022-10-27T07:01:05Z - Message: Successfully created RabbitMQOpsRequest demo/mops-mg-rs-cxhsy1 - Observed Generation: 1 - Reason: CreateOpsRequest - Status: True - Type: CreateOpsRequest - Vpas: - Conditions: - Last Transition Time: 2022-10-27T07:01:00Z - Status: True - Type: RecommendationProvided - Recommendation: - Container Recommendations: - Container Name: RabbitMQ - Lower Bound: - Cpu: 400m - Memory: 400Mi - Target: - Cpu: 400m - Memory: 400Mi - Uncapped Target: - Cpu: 49m - Memory: 262144k - Upper Bound: - Cpu: 1 - Memory: 1Gi - Vpa Name: mg-rs -Events: -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our database. Our autoscaler operator continuously watches the recommendation generated and creates an `RabbitMQopsrequest` based on the recommendations, if the database pods are needed to scaled up or down. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` will be created based on the recommendation. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-rs-cxhsy1 VerticalScaling Progressing 10s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-rs-cxhsy1 VerticalScaling Successful 68s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-mg-rs-cxhsy1 -Name: mops-mg-rs-cxhsy1 -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2022-10-27T07:01:05Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:ownerReferences: - .: - k:{"uid":"ab03414a-67a2-4da4-8960-6e67ae56b503"}: - f:spec: - .: - f:apply: - f:databaseRef: - f:timeout: - f:type: - f:verticalScaling: - .: - f:replicaSet: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - Manager: kubedb-autoscaler - Operation: Update - Time: 2022-10-27T07:01:05Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-ops-manager - Operation: Update - Subresource: status - Time: 2022-10-27T07:02:31Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as-rs - UID: ab03414a-67a2-4da4-8960-6e67ae56b503 - Resource Version: 640598 - UID: f7c6db00-dd0e-4850-8bad-5f0855ce3850 -Spec: - Apply: IfReady - Database Ref: - Name: mg-rs - Timeout: 3m0s - Type: VerticalScaling - Vertical Scaling: - Replica Set: - Limits: - Cpu: 400m - Memory: 400Mi - Requests: - Cpu: 400m - Memory: 400Mi -Status: - Conditions: - Last Transition Time: 2022-10-27T07:01:05Z - Message: RabbitMQ ops request is vertically scaling database - Observed Generation: 1 - Reason: VerticalScaling - Status: True - Type: VerticalScaling - Last Transition Time: 2022-10-27T07:02:30Z - Message: Successfully Vertically Scaled Replicaset Resources - Observed Generation: 1 - Reason: UpdateReplicaSetResources - Status: True - Type: UpdateReplicaSetResources - Last Transition Time: 2022-10-27T07:02:31Z - Message: Successfully Vertically Scaled Database - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 4m9s KubeDB Ops-manager Operator Pausing RabbitMQ demo/mg-rs - Normal PauseDatabase 4m9s KubeDB Ops-manager Operator Successfully paused RabbitMQ demo/mg-rs - Normal Starting 4m9s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-rs - Normal UpdateReplicaSetResources 4m9s KubeDB Ops-manager Operator Successfully updated replicaset Resources - Normal Starting 4m9s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-rs - Normal UpdateReplicaSetResources 4m9s KubeDB Ops-manager Operator Successfully updated replicaset Resources - Normal UpdateReplicaSetResources 2m44s KubeDB Ops-manager Operator Successfully Vertically Scaled Replicaset Resources - Normal ResumeDatabase 2m43s KubeDB Ops-manager Operator Resuming RabbitMQ demo/mg-rs - Normal ResumeDatabase 2m43s KubeDB Ops-manager Operator Successfully resumed RabbitMQ demo/mg-rs - Normal Successful 2m43s KubeDB Ops-manager Operator Successfully Vertically Scaled Database - Normal UpdateReplicaSetResources 2m43s KubeDB Ops-manager Operator Successfully Vertically Scaled Replicaset Resources - -``` - -Now, we are going to verify from the Pod, and the RabbitMQ yaml whether the resources of the replicaset database has updated to meet up the desired state, Let's check, - -```bash -$ kubectl get pod -n demo mg-rs-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "400m", - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} - -$ kubectl get RabbitMQ -n demo mg-rs -o json | jq '.spec.podTemplate.spec.resources' -{ - "limits": { - "cpu": "400m", - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} -``` - - -The above output verifies that we have successfully auto scaled the resources of the RabbitMQ replicaset database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-rs -kubectl delete RabbitMQautoscaler -n demo mg-as-rs -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/autoscaler/compute/sharding.md b/docs/guides/rabbitmq/autoscaler/compute/sharding.md deleted file mode 100644 index 7772f435b1..0000000000 --- a/docs/guides/rabbitmq/autoscaler/compute/sharding.md +++ /dev/null @@ -1,571 +0,0 @@ ---- -title: RabbitMQ Shard Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-auto-scaling-shard - name: Sharding - parent: mg-compute-auto-scaling - weight: 25 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Autoscaling the Compute Resource of a RabbitMQ Sharded Database - -This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a RabbitMQ sharded database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Compute Resource Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/compute/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Autoscaling of Sharded Database - -Here, we are going to deploy a `RabbitMQ` sharded database using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ Sharded Database - -In this section, we are going to deploy a RabbitMQ sharded database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sh - namespace: demo -spec: - version: "4.4.26" - storageType: Durable - shardTopology: - configServer: - storage: - resources: - requests: - storage: 1Gi - replicas: 3 - podTemplate: - spec: - resources: - requests: - cpu: "200m" - memory: "300Mi" - mongos: - replicas: 2 - podTemplate: - spec: - resources: - requests: - cpu: "200m" - memory: "300Mi" - shard: - storage: - resources: - requests: - storage: 1Gi - replicas: 3 - shards: 2 - podTemplate: - spec: - resources: - requests: - cpu: "200m" - memory: "300Mi" - terminationPolicy: WipeOut -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-sh.yaml -RabbitMQ.kubedb.com/mg-sh created -``` - -Now, wait until `mg-sh` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sh 4.4.26 Ready 3m57s -``` - -Let's check a shard Pod containers resources, - -```bash -$ kubectl get pod -n demo mg-sh-shard0-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -Let's check the RabbitMQ resources, -```bash -$ kubectl get RabbitMQ -n demo mg-sh -o json | jq '.spec.shardTopology.shard.podTemplate.spec.resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -You can see from the above outputs that the resources are same as the one we have assigned while deploying the RabbitMQ. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up autoscaling for this database. - -### Compute Resource Autoscaling - -Here, we are going to set up compute resource autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up compute resource autoscaling for the shard pod of the database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as-sh - namespace: demo -spec: - databaseRef: - name: mg-sh - opsRequestOptions: - timeout: 3m - apply: IfReady - compute: - shard: - trigger: "On" - podLifeTimeThreshold: 5m - resourceDiffPercentage: 20 - minAllowed: - cpu: 400m - memory: 400Mi - maxAllowed: - cpu: 1 - memory: 1Gi - controlledResources: ["cpu", "memory"] - containerControlledValues: "RequestsAndLimits" -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing compute resource scaling operation on `mg-sh` database. -- `spec.compute.shard.trigger` specifies that compute autoscaling is enabled for the shard pods of this database. -- `spec.compute.shard.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. -- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. - If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. -- `spec.compute.shard.minAllowed` specifies the minimum allowed resources for the database. -- `spec.compute.shard.maxAllowed` specifies the maximum allowed resources for the database. -- `spec.compute.shard.controlledResources` specifies the resources that are controlled by the autoscaler. -- `spec.compute.shard.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". -- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 3 fields. Know more about them here : [readinessCriteria](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria), [timeout](/docs/guides/RabbitMQ/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/RabbitMQ/concepts/opsrequest.md#specapply). -> Note: In this demo we are only setting up the autoscaling for the shard pods, that's why we only specified the shard section of the autoscaler. You can enable autoscaling for mongos and configServer pods in the same yaml, by specifying the `spec.compute.mongos` and `spec.compute.configServer` section, similar to the `spec.comput.shard` section we have configured in this demo. - -If it was an `InMemory database`, we could also autoscaler the inMemory resources using RabbitMQ compute autoscaler, like below. - -#### Autoscale inMemory database -To autoscale inMemory databases, you need to specify the `spec.compute.shard.inMemoryStorage` section. - -```yaml - ... - inMemoryStorage: - usageThresholdPercentage: 80 - scalingFactorPercentage: 30 - ... -``` -It has two fields inside it. -- `usageThresholdPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased. Default usage threshold is 70%. -- `scalingFactorPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased by this given scaling percentage. Default scaling percentage is 50%. - -> Note: To inform you, We use `db.serverStatus().inMemory.cache["bytes currently in the cache"]` & `db.serverStatus().inMemory.cache["maximum bytes configured"]` to calculate the used & maximum inMemory storage respectively. - - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-as-sh.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as-sh created -``` - -#### Verify Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as-sh 102s - -$ kubectl describe RabbitMQautoscaler mg-as-sh -n demo -Name: mg-as-sh -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2022-10-27T09:46:48Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:compute: - .: - f:shard: - .: - f:containerControlledValues: - f:controlledResources: - f:maxAllowed: - .: - f:cpu: - f:memory: - f:minAllowed: - .: - f:cpu: - f:memory: - f:podLifeTimeThreshold: - f:resourceDiffPercentage: - f:trigger: - f:databaseRef: - f:opsRequestOptions: - .: - f:apply: - f:timeout: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-10-27T09:46:48Z - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:checkpoints: - f:conditions: - f:vpas: - Manager: kubedb-autoscaler - Operation: Update - Subresource: status - Time: 2022-10-27T09:47:08Z - Resource Version: 654853 - UID: 36878e8e-f100-409e-aa76-e6f46569df76 -Spec: - Compute: - Shard: - Container Controlled Values: RequestsAndLimits - Controlled Resources: - cpu - memory - Max Allowed: - Cpu: 1 - Memory: 1Gi - Min Allowed: - Cpu: 400m - Memory: 400Mi - Pod Life Time Threshold: 5m0s - Resource Diff Percentage: 20 - Trigger: On - Database Ref: - Name: mg-sh - Ops Request Options: - Apply: IfReady - Timeout: 3m0s -Status: - Checkpoints: - Cpu Histogram: - Bucket Weights: - Index: 1 - Weight: 5001 - Index: 2 - Weight: 10000 - Reference Timestamp: 2022-10-27T00:00:00Z - Total Weight: 0.397915611757652 - First Sample Start: 2022-10-27T09:46:43Z - Last Sample Start: 2022-10-27T09:46:57Z - Last Update Time: 2022-10-27T09:47:06Z - Memory Histogram: - Reference Timestamp: 2022-10-28T00:00:00Z - Ref: - Container Name: RabbitMQ - Vpa Object Name: mg-sh-shard0 - Total Samples Count: 3 - Version: v3 - Cpu Histogram: - Bucket Weights: - Index: 1 - Weight: 10000 - Reference Timestamp: 2022-10-27T00:00:00Z - Total Weight: 0.39793263724156597 - First Sample Start: 2022-10-27T09:46:50Z - Last Sample Start: 2022-10-27T09:46:56Z - Last Update Time: 2022-10-27T09:47:06Z - Memory Histogram: - Reference Timestamp: 2022-10-28T00:00:00Z - Ref: - Container Name: RabbitMQ - Vpa Object Name: mg-sh-shard1 - Total Samples Count: 3 - Version: v3 - Conditions: - Last Transition Time: 2022-10-27T09:47:08Z - Message: Successfully created RabbitMQOpsRequest demo/mops-vpa-mg-sh-shard-ml75qi - Observed Generation: 1 - Reason: CreateOpsRequest - Status: True - Type: CreateOpsRequest - Vpas: - Conditions: - Last Transition Time: 2022-10-27T09:47:06Z - Status: True - Type: RecommendationProvided - Recommendation: - Container Recommendations: - Container Name: RabbitMQ - Lower Bound: - Cpu: 400m - Memory: 400Mi - Target: - Cpu: 400m - Memory: 400Mi - Uncapped Target: - Cpu: 35m - Memory: 262144k - Upper Bound: - Cpu: 1 - Memory: 1Gi - Vpa Name: mg-sh-shard0 - Conditions: - Last Transition Time: 2022-10-27T09:47:06Z - Status: True - Type: RecommendationProvided - Recommendation: - Container Recommendations: - Container Name: RabbitMQ - Lower Bound: - Cpu: 400m - Memory: 400Mi - Target: - Cpu: 400m - Memory: 400Mi - Uncapped Target: - Cpu: 25m - Memory: 262144k - Upper Bound: - Cpu: 1 - Memory: 1Gi - Vpa Name: mg-sh-shard1 -Events: - -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our database. Our autoscaler operator continuously watches the recommendation generated and creates an `RabbitMQopsrequest` based on the recommendations, if the database pods are needed to scaled up or down. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` will be created based on the recommendation. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-vpa-mg-sh-shard-ml75qi VerticalScaling Progressing 19s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-vpa-mg-sh-shard-ml75qi VerticalScaling Successful 5m8s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-vpa-mg-sh-shard-ml75qi -Name: mops-vpa-mg-sh-shard-ml75qi -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2022-10-27T09:47:08Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:ownerReferences: - .: - k:{"uid":"36878e8e-f100-409e-aa76-e6f46569df76"}: - f:spec: - .: - f:apply: - f:databaseRef: - f:timeout: - f:type: - f:verticalScaling: - .: - f:shard: - .: - f:limits: - .: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - Manager: kubedb-autoscaler - Operation: Update - Time: 2022-10-27T09:47:08Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-ops-manager - Operation: Update - Subresource: status - Time: 2022-10-27T09:49:49Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as-sh - UID: 36878e8e-f100-409e-aa76-e6f46569df76 - Resource Version: 655347 - UID: c44fbd53-40f9-42ca-9b4c-823d8e998d01 -Spec: - Apply: IfReady - Database Ref: - Name: mg-sh - Timeout: 3m0s - Type: VerticalScaling - Vertical Scaling: - Shard: - Limits: - Memory: 400Mi - Requests: - Cpu: 400m - Memory: 400Mi -Status: - Conditions: - Last Transition Time: 2022-10-27T09:47:08Z - Message: RabbitMQ ops request is vertically scaling database - Observed Generation: 1 - Reason: VerticalScaling - Status: True - Type: VerticalScaling - Last Transition Time: 2022-10-27T09:49:49Z - Message: Successfully Vertically Scaled Shard Resources - Observed Generation: 1 - Reason: UpdateShardResources - Status: True - Type: UpdateShardResources - Last Transition Time: 2022-10-27T09:49:49Z - Message: Successfully Vertically Scaled Database - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 3m27s KubeDB Ops-manager Operator Pausing RabbitMQ demo/mg-sh - Normal PauseDatabase 3m27s KubeDB Ops-manager Operator Successfully paused RabbitMQ demo/mg-sh - Normal Starting 3m27s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sh-shard0 - Normal Starting 3m27s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sh-shard1 - Normal UpdateShardResources 3m27s KubeDB Ops-manager Operator Successfully updated Shard Resources - Normal Starting 3m27s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sh-shard0 - Normal Starting 3m27s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sh-shard1 - Normal UpdateShardResources 3m27s KubeDB Ops-manager Operator Successfully updated Shard Resources - Normal UpdateShardResources 46s KubeDB Ops-manager Operator Successfully Vertically Scaled Shard Resources - Normal ResumeDatabase 46s KubeDB Ops-manager Operator Resuming RabbitMQ demo/mg-sh - Normal ResumeDatabase 46s KubeDB Ops-manager Operator Successfully resumed RabbitMQ demo/mg-sh - Normal Successful 46s KubeDB Ops-manager Operator Successfully Vertically Scaled Database -``` - -Now, we are going to verify from the Pod, and the RabbitMQ yaml whether the resources of the shard pod of the database has updated to meet up the desired state, Let's check, - -```bash -$ kubectl get pod -n demo mg-sh-shard0-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} - - -$ kubectl get RabbitMQ -n demo mg-sh -o json | jq '.spec.shardTopology.shard.podTemplate.spec.resources' -{ - "limits": { - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} - -``` - - -The above output verifies that we have successfully auto scaled the resources of the RabbitMQ sharded database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-sh -kubectl delete RabbitMQautoscaler -n demo mg-as-sh -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/autoscaler/compute/standalone.md b/docs/guides/rabbitmq/autoscaler/compute/standalone.md deleted file mode 100644 index d72799b003..0000000000 --- a/docs/guides/rabbitmq/autoscaler/compute/standalone.md +++ /dev/null @@ -1,511 +0,0 @@ ---- -title: RabbitMQ Standalone Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-auto-scaling-standalone - name: Standalone - parent: mg-compute-auto-scaling - weight: 15 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Autoscaling the Compute Resource of a RabbitMQ Standalone Database - -This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a RabbitMQ standalone database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Compute Resource Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/compute/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Autoscaling of Standalone Database - -Here, we are going to deploy a `RabbitMQ` standalone using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ standalone - -In this section, we are going to deploy a RabbitMQ standalone database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-standalone - namespace: demo -spec: - version: "4.4.26" - storageType: Durable - storage: - resources: - requests: - storage: 1Gi - podTemplate: - spec: - resources: - requests: - cpu: "200m" - memory: "300Mi" - limits: - cpu: "200m" - memory: "300Mi" - terminationPolicy: WipeOut -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-standalone.yaml -RabbitMQ.kubedb.com/mg-standalone created -``` - -Now, wait until `mg-standalone` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-standalone 4.4.26 Ready 2m53s -``` - -Let's check the Pod containers resources, - -```bash -$ kubectl get pod -n demo mg-standalone-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -Let's check the RabbitMQ resources, -```bash -$ kubectl get RabbitMQ -n demo mg-standalone -o json | jq '.spec.podTemplate.spec.resources' -{ - "limits": { - "cpu": "200m", - "memory": "300Mi" - }, - "requests": { - "cpu": "200m", - "memory": "300Mi" - } -} -``` - -You can see from the above outputs that the resources are same as the one we have assigned while deploying the RabbitMQ. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up autoscaling for this database. - -### Compute Resource Autoscaling - -Here, we are going to set up compute (cpu and memory) autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up compute resource autoscaling for this standalone database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as - namespace: demo -spec: - databaseRef: - name: mg-standalone - opsRequestOptions: - timeout: 3m - apply: IfReady - compute: - standalone: - trigger: "On" - podLifeTimeThreshold: 5m - resourceDiffPercentage: 20 - minAllowed: - cpu: 400m - memory: 400Mi - maxAllowed: - cpu: 1 - memory: 1Gi - controlledResources: ["cpu", "memory"] - containerControlledValues: "RequestsAndLimits" -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing compute resource autoscaling on `mg-standalone` database. -- `spec.compute.standalone.trigger` specifies that compute resource autoscaling is enabled for this database. -- `spec.compute.standalone.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. -- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. - If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. -- `spec.compute.standalone.minAllowed` specifies the minimum allowed resources for the database. -- `spec.compute.standalone.maxAllowed` specifies the maximum allowed resources for the database. -- `spec.compute.standalone.controlledResources` specifies the resources that are controlled by the autoscaler. -- `spec.compute.standalone.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". -- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 3 fields. Know more about them here : [readinessCriteria](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria), [timeout](/docs/guides/RabbitMQ/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/RabbitMQ/concepts/opsrequest.md#specapply). - -If it was an `InMemory database`, we could also autoscaler the inMemory resources using RabbitMQ compute autoscaler, like below. - -#### Autoscale inMemory database -To autoscale inMemory databases, you need to specify the `spec.compute.standalone.inMemoryStorage` section. - -```yaml - ... - inMemoryStorage: - usageThresholdPercentage: 80 - scalingFactorPercentage: 30 - ... -``` -It has two fields inside it. -- `usageThresholdPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased. Default usage threshold is 70%. -- `scalingFactorPercentage`. If db uses more than usageThresholdPercentage of the total memory, memoryStorage should be increased by this given scaling percentage. Default scaling percentage is 50%. - -> Note: To inform you, We use `db.serverStatus().inMemory.cache["bytes currently in the cache"]` & `db.serverStatus().inMemory.cache["maximum bytes configured"]` to calculate the used & maximum inMemory storage respectively. - - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/compute/mg-as-standalone.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as created -``` - -#### Verify Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as 102s - -$ kubectl describe RabbitMQautoscaler mg-as -n demo -Name: mg-as -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2022-10-27T09:54:35Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:compute: - .: - f:standalone: - .: - f:containerControlledValues: - f:controlledResources: - f:maxAllowed: - .: - f:cpu: - f:memory: - f:minAllowed: - .: - f:cpu: - f:memory: - f:podLifeTimeThreshold: - f:resourceDiffPercentage: - f:trigger: - f:databaseRef: - f:opsRequestOptions: - .: - f:apply: - f:timeout: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-10-27T09:54:35Z - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:checkpoints: - f:conditions: - f:vpas: - Manager: kubedb-autoscaler - Operation: Update - Subresource: status - Time: 2022-10-27T09:55:08Z - Resource Version: 656164 - UID: 439c148f-7c22-456f-a4b4-758cead29932 -Spec: - Compute: - Standalone: - Container Controlled Values: RequestsAndLimits - Controlled Resources: - cpu - memory - Max Allowed: - Cpu: 1 - Memory: 1Gi - Min Allowed: - Cpu: 400m - Memory: 400Mi - Pod Life Time Threshold: 5m0s - Resource Diff Percentage: 20 - Trigger: On - Database Ref: - Name: mg-standalone - Ops Request Options: - Apply: IfReady - Timeout: 3m0s -Status: - Checkpoints: - Cpu Histogram: - Bucket Weights: - Index: 6 - Weight: 10000 - Reference Timestamp: 2022-10-27T00:00:00Z - Total Weight: 0.133158834498727 - First Sample Start: 2022-10-27T09:54:56Z - Last Sample Start: 2022-10-27T09:54:56Z - Last Update Time: 2022-10-27T09:55:07Z - Memory Histogram: - Reference Timestamp: 2022-10-28T00:00:00Z - Ref: - Container Name: RabbitMQ - Vpa Object Name: mg-standalone - Total Samples Count: 1 - Version: v3 - Conditions: - Last Transition Time: 2022-10-27T09:55:08Z - Message: Successfully created RabbitMQOpsRequest demo/mops-mg-standalone-57huq2 - Observed Generation: 1 - Reason: CreateOpsRequest - Status: True - Type: CreateOpsRequest - Vpas: - Conditions: - Last Transition Time: 2022-10-27T09:55:07Z - Status: True - Type: RecommendationProvided - Recommendation: - Container Recommendations: - Container Name: RabbitMQ - Lower Bound: - Cpu: 400m - Memory: 400Mi - Target: - Cpu: 400m - Memory: 400Mi - Uncapped Target: - Cpu: 93m - Memory: 262144k - Upper Bound: - Cpu: 1 - Memory: 1Gi - Vpa Name: mg-standalone -Events: - -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our database. Our autoscaler operator continuously watches the recommendation generated and creates an `RabbitMQopsrequest` based on the recommendations, if the database pods are needed to scaled up or down. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` will be created based on the recommendation. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-standalone-57huq2 VerticalScaling Progressing 10s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-standalone-57huq2 VerticalScaling Successful 68s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-mg-standalone-57huq2 -Name: mops-mg-standalone-57huq2 -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2022-10-27T09:55:08Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:ownerReferences: - .: - k:{"uid":"439c148f-7c22-456f-a4b4-758cead29932"}: - f:spec: - .: - f:apply: - f:databaseRef: - f:timeout: - f:type: - f:verticalScaling: - .: - f:standalone: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - Manager: kubedb-autoscaler - Operation: Update - Time: 2022-10-27T09:55:08Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-ops-manager - Operation: Update - Subresource: status - Time: 2022-10-27T09:55:33Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as - UID: 439c148f-7c22-456f-a4b4-758cead29932 - Resource Version: 656279 - UID: 29908a23-7cba-4f81-b787-3f9d226993f8 -Spec: - Apply: IfReady - Database Ref: - Name: mg-standalone - Timeout: 3m0s - Type: VerticalScaling - Vertical Scaling: - Standalone: - Limits: - Cpu: 400m - Memory: 400Mi - Requests: - Cpu: 400m - Memory: 400Mi -Status: - Conditions: - Last Transition Time: 2022-10-27T09:55:08Z - Message: RabbitMQ ops request is vertically scaling database - Observed Generation: 1 - Reason: VerticalScaling - Status: True - Type: VerticalScaling - Last Transition Time: 2022-10-27T09:55:33Z - Message: Successfully Vertically Scaled Standalone Resources - Observed Generation: 1 - Reason: UpdateStandaloneResources - Status: True - Type: UpdateStandaloneResources - Last Transition Time: 2022-10-27T09:55:33Z - Message: Successfully Vertically Scaled Database - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 2m40s KubeDB Ops-manager Operator Pausing RabbitMQ demo/mg-standalone - Normal PauseDatabase 2m40s KubeDB Ops-manager Operator Successfully paused RabbitMQ demo/mg-standalone - Normal Starting 2m40s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-standalone - Normal UpdateStandaloneResources 2m40s KubeDB Ops-manager Operator Successfully updated standalone Resources - Normal Starting 2m40s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-standalone - Normal UpdateStandaloneResources 2m40s KubeDB Ops-manager Operator Successfully updated standalone Resources - Normal UpdateStandaloneResources 2m15s KubeDB Ops-manager Operator Successfully Vertically Scaled Standalone Resources - Normal ResumeDatabase 2m15s KubeDB Ops-manager Operator Resuming RabbitMQ demo/mg-standalone - Normal ResumeDatabase 2m15s KubeDB Ops-manager Operator Successfully resumed RabbitMQ demo/mg-standalone - Normal Successful 2m15s KubeDB Ops-manager Operator Successfully Vertically Scaled Database -``` - -Now, we are going to verify from the Pod, and the RabbitMQ yaml whether the resources of the standalone database has updated to meet up the desired state, Let's check, - -```bash -$ kubectl get pod -n demo mg-standalone-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "400m", - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} - -$ kubectl get RabbitMQ -n demo mg-standalone -o json | jq '.spec.podTemplate.spec.resources' -{ - "limits": { - "cpu": "400m", - "memory": "400Mi" - }, - "requests": { - "cpu": "400m", - "memory": "400Mi" - } -} -``` - - -The above output verifies that we have successfully auto scaled the resources of the RabbitMQ standalone database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-standalone -kubectl delete RabbitMQautoscaler -n demo mg-as -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/autoscaler/storage/_index.md b/docs/guides/rabbitmq/autoscaler/storage/_index.md index 1e28090c06..f0f1d305ad 100644 --- a/docs/guides/rabbitmq/autoscaler/storage/_index.md +++ b/docs/guides/rabbitmq/autoscaler/storage/_index.md @@ -2,9 +2,9 @@ title: Storage Autoscaling menu: docs_{{ .version }}: - identifier: mg-storage-auto-scaling + identifier: rm-storage-autoscaling name: Storage Autoscaling - parent: mg-auto-scaling + parent: rm-autoscaling weight: 46 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/autoscaler/storage/overview.md b/docs/guides/rabbitmq/autoscaler/storage/overview.md index 60755c9bd8..afdfe3dd5c 100644 --- a/docs/guides/rabbitmq/autoscaler/storage/overview.md +++ b/docs/guides/rabbitmq/autoscaler/storage/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ Storage Autoscaling Overview menu: docs_{{ .version }}: - identifier: mg-storage-auto-scaling-overview + identifier: rm-autoscaling-storage-overview name: Overview - parent: mg-storage-auto-scaling + parent: rm-autoscaling weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -19,19 +19,14 @@ This guide will give an overview on how KubeDB Autoscaler operator autoscales th ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQAutoscaler](/docs/guides/rabbitmq/concepts/autoscaler.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Storage Autoscaling Works The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `RabbitMQ` database components. Open the image in a new tab to see the enlarged version. -
-  Storage Auto Scaling process of RabbitMQ -
Fig: Storage Auto Scaling process of RabbitMQ
-
- The Auto Scaling process consists of the following steps: diff --git a/docs/guides/rabbitmq/autoscaler/storage/replicaset.md b/docs/guides/rabbitmq/autoscaler/storage/replicaset.md deleted file mode 100644 index 63d89a7304..0000000000 --- a/docs/guides/rabbitmq/autoscaler/storage/replicaset.md +++ /dev/null @@ -1,387 +0,0 @@ ---- -title: RabbitMQ Replicaset Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-storage-auto-scaling-replicaset - name: ReplicaSet - parent: mg-storage-auto-scaling - weight: 20 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Storage Autoscaling of a RabbitMQ Replicaset Database - -This guide will show you how to use `KubeDB` to autoscale the storage of a RabbitMQ Replicaset database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) - -- You must have a `StorageClass` that supports volume expansion. - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Storage Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/storage/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Storage Autoscaling of ReplicaSet Database - -At first verify that your cluster has a storage class, that supports volume expansion. Let's check, - -```bash -$ kubectl get storageclass -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 9h -topolvm-provisioner topolvm.cybozu.com Delete WaitForFirstConsumer true 9h -``` - -We can see from the output the `topolvm-provisioner` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) - -Now, we are going to deploy a `RabbitMQ` replicaset using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ replicaset - -In this section, we are going to deploy a RabbitMQ replicaset database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-rs - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - replicas: 3 - storageType: Durable - storage: - storageClassName: topolvm-provisioner - resources: - requests: - storage: 1Gi - terminationPolicy: WipeOut -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-rs.yaml -RabbitMQ.kubedb.com/mg-rs created -``` - -Now, wait until `mg-rs` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-rs 4.4.26 Ready 2m53s -``` - -Let's check volume size from statefulset, and from the persistent volume, - -```bash -$ kubectl get sts -n demo mg-rs -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-b16daa50-83fc-4d25-b553-4a25f13166d5 1Gi RWO Delete Bound demo/datadir-mg-rs-0 topolvm-provisioner 2m12s -pvc-d4616bef-359d-4b73-ab9f-38c24aaaec8c 1Gi RWO Delete Bound demo/datadir-mg-rs-1 topolvm-provisioner 61s -pvc-ead21204-3dc7-453c-8121-d2fe48b1c3e2 1Gi RWO Delete Bound demo/datadir-mg-rs-2 topolvm-provisioner 18s -``` - -You can see the statefulset has 1GB storage, and the capacity of all the persistent volume is also 1GB. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up storage autoscaling for this database. - -### Storage Autoscaling - -Here, we are going to set up storage autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up vertical autoscaling for this replicaset database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as-rs - namespace: demo -spec: - databaseRef: - name: mg-rs - storage: - replicaSet: - expansionMode: "Online" - trigger: "On" - usageThreshold: 60 - scalingThreshold: 50 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mg-rs` database. -- `spec.storage.replicaSet.trigger` specifies that storage autoscaling is enabled for this database. -- `spec.storage.replicaSet.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. -- `spec.storage.replicaSet.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `50%` of the current amount. -- It has another field `spec.storage.replicaSet.expansionMode` to set the opsRequest volumeExpansionMode, which support two values: `Online` & `Offline`. Default value is `Online`. - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-as-rs.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as-rs created -``` - -#### Storage Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as-rs 20s - -$ kubectl describe RabbitMQautoscaler mg-as-rs -n demo -Name: mg-as-rs -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2021-03-08T14:11:46Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:storage: - .: - f:replicaSet: - .: - f:scalingThreshold: - f:trigger: - f:usageThreshold: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-08T14:11:46Z - Resource Version: 152149 - Self Link: /apis/autoscaling.kubedb.com/v1alpha1/namespaces/demo/RabbitMQautoscalers/mg-as-rs - UID: a0dab64d-e7c4-4819-8ffe-360c70231577 -Spec: - Database Ref: - Name: mg-rs - Storage: - Replica Set: - Scaling Threshold: 50 - Trigger: On - Usage Threshold: 60 -Events: -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. - -Let's exec into the database pod and fill the database volume using the following commands: - -```bash -$ kubectl exec -it -n demo mg-rs-0 -- bash -root@mg-rs-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/760cb655-91fe-4497-ab4a-a771aa53ece4 1014M 335M 680M 33% /data/db -root@mg-rs-0:/# dd if=/dev/zero of=/data/db/file.img bs=500M count=1 -1+0 records in -1+0 records out -524288000 bytes (524 MB, 500 MiB) copied, 0.482378 s, 1.1 GB/s -root@mg-rs-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/760cb655-91fe-4497-ab4a-a771aa53ece4 1014M 835M 180M 83% /data/db -``` - -So, from the above output we can see that the storage usage is 83%, which exceeded the `usageThreshold` 60%. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-rs-mft11m VolumeExpansion Progressing 10s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-rs-mft11m VolumeExpansion Successful 97s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-mg-rs-mft11m -Name: mops-mg-rs-mft11m -Namespace: demo -Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-rs - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-08T14:15:52Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: - f:app.kubernetes.io/component: - f:app.kubernetes.io/instance: - f:app.kubernetes.io/managed-by: - f:app.kubernetes.io/name: - f:ownerReferences: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:type: - f:volumeExpansion: - .: - f:replicaSet: - Manager: kubedb-autoscaler - Operation: Update - Time: 2021-03-08T14:15:52Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-08T14:15:52Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as-rs - UID: a0dab64d-e7c4-4819-8ffe-360c70231577 - Resource Version: 153496 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-mg-rs-mft11m - UID: 84567b84-6de4-4658-b0d2-2c374e03e63d -Spec: - Database Ref: - Name: mg-rs - Type: VolumeExpansion - Volume Expansion: - Replica Set: 1594884096 -Status: - Conditions: - Last Transition Time: 2021-03-08T14:15:52Z - Message: RabbitMQ ops request is expanding volume of database - Observed Generation: 1 - Reason: VolumeExpansion - Status: True - Type: VolumeExpansion - Last Transition Time: 2021-03-08T14:17:02Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: ReplicasetVolumeExpansion - Status: True - Type: ReplicasetVolumeExpansion - Last Transition Time: 2021-03-08T14:17:07Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: - Status: True - Type: - Last Transition Time: 2021-03-08T14:17:12Z - Message: StatefulSet is recreated - Observed Generation: 1 - Reason: ReadyStatefulSets - Status: True - Type: ReadyStatefulSets - Last Transition Time: 2021-03-08T14:17:12Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 2m36s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-rs - Normal PauseDatabase 2m36s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-rs - Normal ReplicasetVolumeExpansion 86s KubeDB Ops-manager operator Successfully Expanded Volume - Normal 81s KubeDB Ops-manager operator Successfully Expanded Volume - Normal ResumeDatabase 81s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-rs - Normal ResumeDatabase 81s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-rs - Normal ReadyStatefulSets 76s KubeDB Ops-manager operator StatefulSet is recreated - Normal Successful 76s KubeDB Ops-manager operator Successfully Expanded Volume -``` - -Now, we are going to verify from the `Statefulset`, and the `Persistent Volume` whether the volume of the replicaset database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get sts -n demo mg-rs -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1594884096" -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-b16daa50-83fc-4d25-b553-4a25f13166d5 2Gi RWO Delete Bound demo/datadir-mg-rs-0 topolvm-provisioner 11m -pvc-d4616bef-359d-4b73-ab9f-38c24aaaec8c 2Gi RWO Delete Bound demo/datadir-mg-rs-1 topolvm-provisioner 10m -pvc-ead21204-3dc7-453c-8121-d2fe48b1c3e2 2Gi RWO Delete Bound demo/datadir-mg-rs-2 topolvm-provisioner 9m52s -``` - -The above output verifies that we have successfully autoscaled the volume of the RabbitMQ replicaset database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-rs -kubectl delete RabbitMQautoscaler -n demo mg-as-rs -``` diff --git a/docs/guides/rabbitmq/autoscaler/storage/sharding.md b/docs/guides/rabbitmq/autoscaler/storage/sharding.md deleted file mode 100644 index 76e8aec34c..0000000000 --- a/docs/guides/rabbitmq/autoscaler/storage/sharding.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: RabbitMQ Shard Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-storage-auto-scaling-shard - name: Sharding - parent: mg-storage-auto-scaling - weight: 25 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Storage Autoscaling of a RabbitMQ Sharded Database - -This guide will show you how to use `KubeDB` to autoscale the storage of a RabbitMQ Sharded database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) - -- You must have a `StorageClass` that supports volume expansion. - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Storage Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/storage/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Storage Autoscaling of Sharded Database - -At first verify that your cluster has a storage class, that supports volume expansion. Let's check, - -```bash -$ kubectl get storageclass -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 9h -topolvm-provisioner topolvm.cybozu.com Delete WaitForFirstConsumer true 9h -``` - -We can see from the output the `topolvm-provisioner` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) - -Now, we are going to deploy a `RabbitMQ` sharded database using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ Sharded Database - -In this section, we are going to deploy a RabbitMQ sharded database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sh - namespace: demo -spec: - version: "4.4.26" - storageType: Durable - shardTopology: - configServer: - storage: - storageClassName: topolvm-provisioner - resources: - requests: - storage: 1Gi - replicas: 3 - mongos: - replicas: 2 - shard: - storage: - storageClassName: topolvm-provisioner - resources: - requests: - storage: 1Gi - replicas: 3 - shards: 2 - terminationPolicy: WipeOut -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-sh.yaml -RabbitMQ.kubedb.com/mg-sh created -``` - -Now, wait until `mg-sh` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sh 4.4.26 Ready 3m51s -``` - -Let's check volume size from one of the shard statefulset, and from the persistent volume, - -```bash -$ kubectl get sts -n demo mg-sh-shard0 -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-031836c6-95ae-4015-938c-da183c205828 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-0 topolvm-provisioner 5m1s -pvc-2515233f-0f7d-4d0d-8b45-97a3cb9d4488 1Gi RWO Delete Bound demo/datadir-mg-sh-shard0-2 topolvm-provisioner 3m44s -pvc-35f73708-3c11-4ead-a60b-e1679a294b81 1Gi RWO Delete Bound demo/datadir-mg-sh-shard0-0 topolvm-provisioner 5m -pvc-4b329feb-8c92-4605-a37e-c02b3499e311 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-2 topolvm-provisioner 3m55s -pvc-52490270-1355-4045-b2a1-872a671ab006 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-1 topolvm-provisioner 4m28s -pvc-80dc91d3-f56f-4037-b6e1-f69e13fb434c 1Gi RWO Delete Bound demo/datadir-mg-sh-shard1-1 topolvm-provisioner 4m26s -pvc-c1965a32-7471-4885-ac52-f9eab056d48e 1Gi RWO Delete Bound demo/datadir-mg-sh-shard1-2 topolvm-provisioner 3m57s -pvc-c838a27d-c75d-4caa-9c1d-456af3bfaba0 1Gi RWO Delete Bound demo/datadir-mg-sh-shard1-0 topolvm-provisioner 4m59s -pvc-d47f19be-f206-41c5-a0b1-5022776fea2f 1Gi RWO Delete Bound demo/datadir-mg-sh-shard0-1 topolvm-provisioner 4m25s -``` - -You can see the statefulset has 1GB storage, and the capacity of all the persistent volume is also 1GB. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up storage autoscaling for this database. - -### Storage Autoscaling - -Here, we are going to set up storage autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up vertical autoscaling for this sharded database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as-sh - namespace: demo -spec: - databaseRef: - name: mg-sh - storage: - shard: - expansionMode: "Online" - trigger: "On" - usageThreshold: 60 - scalingThreshold: 50 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mg-sh` database. -- `spec.storage.shard.trigger` specifies that storage autoscaling is enabled for this database. -- `spec.storage.shard.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. -- `spec.storage.shard.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `50%` of the current amount. -- It has another field `spec.storage.replicaSet.expansionMode` to set the opsRequest volumeExpansionMode, which support two values: `Online` & `Offline`. Default value is `Online`. - -> Note: In this demo we are only setting up the storage autoscaling for the shard pods, that's why we only specified the shard section of the autoscaler. You can enable autoscaling for configServer pods in the same yaml, by specifying the `spec.configServer` section, similar to the `spec.shard` section we have configured in this demo. - - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-as-sh.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as-sh created -``` - -#### Storage Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as-sh 20s - -$ kubectl describe RabbitMQautoscaler mg-as-sh -n demo -Name: mg-as-sh -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2021-03-08T14:26:06Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:storage: - .: - f:shard: - .: - f:scalingThreshold: - f:trigger: - f:usageThreshold: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-08T14:26:06Z - Resource Version: 156292 - Self Link: /apis/autoscaling.kubedb.com/v1alpha1/namespaces/demo/RabbitMQautoscalers/mg-as-sh - UID: 203e332f-bdfe-470f-a429-a7b60c7be2ee -Spec: - Database Ref: - Name: mg-sh - Storage: - Shard: - Scaling Threshold: 50 - Trigger: On - Usage Threshold: 60 -Events: -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -Now, for this demo, we are going to manually fill up one of the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. - -Let's exec into the database pod and fill the database volume using the following commands: - -```bash -$ kubectl exec -it -n demo mg-sh-shard0-0 -- bash -root@mg-sh-shard0-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/ad11042f-f4cc-4dfc-9680-2afbbb199d48 1014M 335M 680M 34% /data/db -root@mg-sh-shard0-0:/# dd if=/dev/zero of=/data/db/file.img bs=500M count=1 -1+0 records in -1+0 records out -524288000 bytes (524 MB, 500 MiB) copied, 0.595358 s, 881 MB/s -root@mg-sh-shard0-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/ad11042f-f4cc-4dfc-9680-2afbbb199d48 1014M 837M 178M 83% /data/db -``` - -So, from the above output we can see that the storage usage is 83%, which exceeded the `usageThreshold` 60%. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-sh-ba5ikn VolumeExpansion Progressing 41s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-sh-ba5ikn VolumeExpansion Successful 2m54s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-mg-sh-ba5ikn -Name: mops-mg-sh-ba5ikn -Namespace: demo -Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-sh - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-08T14:31:52Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: - f:app.kubernetes.io/component: - f:app.kubernetes.io/instance: - f:app.kubernetes.io/managed-by: - f:app.kubernetes.io/name: - f:ownerReferences: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:type: - f:volumeExpansion: - .: - f:shard: - Manager: kubedb-autoscaler - Operation: Update - Time: 2021-03-08T14:31:52Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-08T14:31:52Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as-sh - UID: 203e332f-bdfe-470f-a429-a7b60c7be2ee - Resource Version: 158488 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-mg-sh-ba5ikn - UID: c56236c2-5b64-4775-ba5a-35727b96a414 -Spec: - Database Ref: - Name: mg-sh - Type: VolumeExpansion - Volume Expansion: - Shard: 1594884096 -Status: - Conditions: - Last Transition Time: 2021-03-08T14:31:52Z - Message: RabbitMQ ops request is expanding volume of database - Observed Generation: 1 - Reason: VolumeExpansion - Status: True - Type: VolumeExpansion - Last Transition Time: 2021-03-08T14:34:32Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: ShardVolumeExpansion - Status: True - Type: ShardVolumeExpansion - Last Transition Time: 2021-03-08T14:34:37Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: - Status: True - Type: - Last Transition Time: 2021-03-08T14:34:42Z - Message: StatefulSet is recreated - Observed Generation: 1 - Reason: ReadyStatefulSets - Status: True - Type: ReadyStatefulSets - Last Transition Time: 2021-03-08T14:34:42Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 3m21s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-sh - Normal PauseDatabase 3m21s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-sh - Normal ShardVolumeExpansion 41s KubeDB Ops-manager operator Successfully Expanded Volume - Normal 36s KubeDB Ops-manager operator Successfully Expanded Volume - Normal ResumeDatabase 36s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-sh - Normal ResumeDatabase 36s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-sh - Normal ReadyStatefulSets 31s KubeDB Ops-manager operator StatefulSet is recreated - Normal Successful 31s KubeDB Ops-manager operator Successfully Expanded Volume -``` - -Now, we are going to verify from the `Statefulset`, and the `Persistent Volume` whether the volume of the shard nodes of the database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get sts -n demo mg-sh-shard0 -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1594884096" -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-031836c6-95ae-4015-938c-da183c205828 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-0 topolvm-provisioner 13m -pvc-2515233f-0f7d-4d0d-8b45-97a3cb9d4488 2Gi RWO Delete Bound demo/datadir-mg-sh-shard0-2 topolvm-provisioner 11m -pvc-35f73708-3c11-4ead-a60b-e1679a294b81 2Gi RWO Delete Bound demo/datadir-mg-sh-shard0-0 topolvm-provisioner 13m -pvc-4b329feb-8c92-4605-a37e-c02b3499e311 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-2 topolvm-provisioner 11m -pvc-52490270-1355-4045-b2a1-872a671ab006 1Gi RWO Delete Bound demo/datadir-mg-sh-configsvr-1 topolvm-provisioner 12m -pvc-80dc91d3-f56f-4037-b6e1-f69e13fb434c 2Gi RWO Delete Bound demo/datadir-mg-sh-shard1-1 topolvm-provisioner 12m -pvc-c1965a32-7471-4885-ac52-f9eab056d48e 2Gi RWO Delete Bound demo/datadir-mg-sh-shard1-2 topolvm-provisioner 11m -pvc-c838a27d-c75d-4caa-9c1d-456af3bfaba0 2Gi RWO Delete Bound demo/datadir-mg-sh-shard1-0 topolvm-provisioner 12m -pvc-d47f19be-f206-41c5-a0b1-5022776fea2f 2Gi RWO Delete Bound demo/datadir-mg-sh-shard0-1 topolvm-provisioner 12m -``` - -The above output verifies that we have successfully autoscaled the volume of the shard nodes of this RabbitMQ database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-sh -kubectl delete RabbitMQautoscaler -n demo mg-as-sh -``` diff --git a/docs/guides/rabbitmq/autoscaler/storage/standalone.md b/docs/guides/rabbitmq/autoscaler/storage/standalone.md deleted file mode 100644 index fd375b288d..0000000000 --- a/docs/guides/rabbitmq/autoscaler/storage/standalone.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: RabbitMQ Standalone Autoscaling -menu: - docs_{{ .version }}: - identifier: mg-storage-auto-scaling-standalone - name: Standalone - parent: mg-storage-auto-scaling - weight: 15 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Storage Autoscaling of a RabbitMQ Standalone Database - -This guide will show you how to use `KubeDB` to autoscale the storage of a RabbitMQ standalone database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). - -- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) - -- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) - -- You must have a `StorageClass` that supports volume expansion. - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQAutoscaler](/docs/guides/RabbitMQ/concepts/autoscaler.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Storage Autoscaling Overview](/docs/guides/RabbitMQ/autoscaler/storage/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Storage Autoscaling of Standalone Database - -At first verify that your cluster has a storage class, that supports volume expansion. Let's check, - -```bash -$ kubectl get storageclass -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 9h -topolvm-provisioner topolvm.cybozu.com Delete WaitForFirstConsumer true 9h -``` - -We can see from the output the `topolvm-provisioner` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) - -Now, we are going to deploy a `RabbitMQ` standalone using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. - -#### Deploy RabbitMQ standalone - -In this section, we are going to deploy a RabbitMQ standalone database with version `4.4.26`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-standalone - namespace: demo -spec: - version: "4.4.26" - storageType: Durable - storage: - storageClassName: topolvm-provisioner - resources: - requests: - storage: 1Gi - terminationPolicy: WipeOut -``` - -Let's create the `RabbitMQ` CRO we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-standalone.yaml -RabbitMQ.kubedb.com/mg-standalone created -``` - -Now, wait until `mg-standalone` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-standalone 4.4.26 Ready 2m53s -``` - -Let's check volume size from statefulset, and from the persistent volume, - -```bash -$ kubectl get sts -n demo mg-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-cf469ed8-a89a-49ca-bf7c-8c76b7889428 1Gi RWO Delete Bound demo/datadir-mg-standalone-0 topolvm-provisioner 7m41s -``` - -You can see the statefulset has 1GB storage, and the capacity of the persistent volume is also 1GB. - -We are now ready to apply the `RabbitMQAutoscaler` CRO to set up storage autoscaling for this database. - -### Storage Autoscaling - -Here, we are going to set up storage autoscaling using a RabbitMQAutoscaler Object. - -#### Create RabbitMQAutoscaler Object - -In order to set up vertical autoscaling for this standalone database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, - -```yaml -apiVersion: autoscaling.kubedb.com/v1alpha1 -kind: RabbitMQAutoscaler -metadata: - name: mg-as - namespace: demo -spec: - databaseRef: - name: mg-standalone - storage: - standalone: - expansionMode: "Online" - trigger: "On" - usageThreshold: 60 - scalingThreshold: 50 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mg-standalone` database. -- `spec.storage.standalone.trigger` specifies that storage autoscaling is enabled for this database. -- `spec.storage.standalone.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. -- `spec.storage.standalone.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `50%` of the current amount. -- It has another field `spec.storage.replicaSet.expansionMode` to set the opsRequest volumeExpansionMode, which support two values: `Online` & `Offline`. Default value is `Online`. - -Let's create the `RabbitMQAutoscaler` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/autoscaling/storage/mg-as-standalone.yaml -RabbitMQautoscaler.autoscaling.kubedb.com/mg-as created -``` - -#### Storage Autoscaling is set up successfully - -Let's check that the `RabbitMQautoscaler` resource is created successfully, - -```bash -$ kubectl get RabbitMQautoscaler -n demo -NAME AGE -mg-as 102s - -$ kubectl describe RabbitMQautoscaler mg-as -n demo -Name: mg-as -Namespace: demo -Labels: -Annotations: -API Version: autoscaling.kubedb.com/v1alpha1 -Kind: RabbitMQAutoscaler -Metadata: - Creation Timestamp: 2021-03-08T12:58:01Z - Generation: 1 - Managed Fields: - API Version: autoscaling.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:storage: - .: - f:standalone: - .: - f:scalingThreshold: - f:trigger: - f:usageThreshold: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-08T12:58:01Z - Resource Version: 134423 - Self Link: /apis/autoscaling.kubedb.com/v1alpha1/namespaces/demo/RabbitMQautoscalers/mg-as - UID: 999a2dc9-7eb7-4ed2-9e90-d3f8b21c091a -Spec: - Database Ref: - Name: mg-standalone - Storage: - Standalone: - Scaling Threshold: 50 - Trigger: On - Usage Threshold: 60 -Events: -``` -So, the `RabbitMQautoscaler` resource is created successfully. - -Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. - -Let's exec into the database pod and fill the database volume using the following commands: - -```bash -$ kubectl exec -it -n demo mg-standalone-0 -- bash -root@mg-standalone-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/1df4ee9e-b900-4c0f-9d2c-8493fb30bdc0 1014M 334M 681M 33% /data/db -root@mg-standalone-0:/# dd if=/dev/zero of=/data/db/file.img bs=500M count=1 -1+0 records in -1+0 records out -524288000 bytes (524 MB, 500 MiB) copied, 0.359202 s, 1.5 GB/s -root@mg-standalone-0:/# df -h /data/db -Filesystem Size Used Avail Use% Mounted on -/dev/topolvm/1df4ee9e-b900-4c0f-9d2c-8493fb30bdc0 1014M 835M 180M 83% /data/db -``` - -So, from the above output we can see that the storage usage is 84%, which exceeded the `usageThreshold` 60%. - -Let's watch the `RabbitMQopsrequest` in the demo namespace to see if any `RabbitMQopsrequest` object is created. After some time you'll see that a `RabbitMQopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-standalone-p27c11 VolumeExpansion Progressing 26s -``` - -Let's wait for the ops request to become successful. - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-mg-standalone-p27c11 VolumeExpansion Successful 73s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-mg-standalone-p27c11 -Name: mops-mg-standalone-p27c11 -Namespace: demo -Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-standalone - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-08T13:19:51Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: - f:app.kubernetes.io/component: - f:app.kubernetes.io/instance: - f:app.kubernetes.io/managed-by: - f:app.kubernetes.io/name: - f:ownerReferences: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:type: - f:volumeExpansion: - .: - f:standalone: - Manager: kubedb-autoscaler - Operation: Update - Time: 2021-03-08T13:19:51Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-08T13:19:52Z - Owner References: - API Version: autoscaling.kubedb.com/v1alpha1 - Block Owner Deletion: true - Controller: true - Kind: RabbitMQAutoscaler - Name: mg-as - UID: 999a2dc9-7eb7-4ed2-9e90-d3f8b21c091a - Resource Version: 139871 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-mg-standalone-p27c11 - UID: 9606485d-9dd8-4787-9c7c-61fc874c555e -Spec: - Database Ref: - Name: mg-standalone - Type: VolumeExpansion - Volume Expansion: - Standalone: 1594884096 -Status: - Conditions: - Last Transition Time: 2021-03-08T13:19:52Z - Message: RabbitMQ ops request is expanding volume of database - Observed Generation: 1 - Reason: VolumeExpansion - Status: True - Type: VolumeExpansion - Last Transition Time: 2021-03-08T13:20:47Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: StandaloneVolumeExpansion - Status: True - Type: StandaloneVolumeExpansion - Last Transition Time: 2021-03-08T13:20:52Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: - Status: True - Type: - Last Transition Time: 2021-03-08T13:20:57Z - Message: StatefulSet is recreated - Observed Generation: 1 - Reason: ReadyStatefulSets - Status: True - Type: ReadyStatefulSets - Last Transition Time: 2021-03-08T13:20:57Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 110s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-standalone - Normal PauseDatabase 110s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-standalone - Normal StandaloneVolumeExpansion 55s KubeDB Ops-manager operator Successfully Expanded Volume - Normal 50s KubeDB Ops-manager operator Successfully Expanded Volume - Normal ResumeDatabase 50s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-standalone - Normal ResumeDatabase 50s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-standalone - Normal ReadyStatefulSets 45s KubeDB Ops-manager operator StatefulSet is recreated - Normal Successful 45s KubeDB Ops-manager operator Successfully Expanded Volume -``` - -Now, we are going to verify from the `Statefulset`, and the `Persistent Volume` whether the volume of the standalone database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get sts -n demo mg-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1594884096" -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-cf469ed8-a89a-49ca-bf7c-8c76b7889428 2Gi RWO Delete Bound demo/datadir-mg-standalone-0 topolvm-provisioner 26m -``` - -The above output verifies that we have successfully autoscaled the volume of the RabbitMQ standalone database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-standalone -kubectl delete RabbitMQautoscaler -n demo mg-as -``` diff --git a/docs/guides/rabbitmq/autoscaler/storage/storage-autoscale.md b/docs/guides/rabbitmq/autoscaler/storage/storage-autoscale.md new file mode 100644 index 0000000000..5dba557d54 --- /dev/null +++ b/docs/guides/rabbitmq/autoscaler/storage/storage-autoscale.md @@ -0,0 +1,333 @@ +--- +title: RabbitMQ Storage Autoscaling +menu: + docs_{{ .version }}: + identifier: rm-autoscaling-storage-description + name: storage-autoscaling + parent: rm-autoscaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Storage Autoscaling of a RabbitMQ Cluster + +This guide will show you how to use `KubeDB` to autoscale the storage of a RabbitMQ Replicaset database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +- You must have a `StorageClass` that supports volume expansion. + +- You should be familiar with the following `KubeDB` concepts: + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQAutoscaler](/docs/guides/rabbitmq/concepts/autoscaler.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) + - [Storage Autoscaling Overview](/docs/guides/rabbitmq/autoscaler/storage/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +## Storage Autoscaling of Cluster Database + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 79m +topolvm-provisioner topolvm.cybozu.com Delete WaitForFirstConsumer true 78m +``` + +We can see from the output the `topolvm-provisioner` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) + +Now, we are going to deploy a `RabbitMQ` replicaset using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQAutoscaler` to set up autoscaling. + +#### Deploy RabbitMQ Cluster + +In this section, we are going to deploy a RabbitMQ replicaset database with version `10.5.23`. Then, in the next section we will set up autoscaling for this database using `RabbitMQAutoscaler` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, + +> If you want to autoscale RabbitMQ `Standalone`, Just remove the `spec.Replicas` from the below yaml and rest of the steps are same. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-autoscale + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut + podTemplate: + spec: + containers: + - name: rabbitmq + resources: + requests: + cpu: "0.5m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + serviceTemplates: + - alias: primary + spec: + type: LoadBalancer +``` + +Let's create the `RabbitMQ` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/rabbitmq/autoscaler/storage/cluster/examples/sample-rabbitmq.yaml +rabbitmq.kubedb.com/sample-rabbitmq created +``` + +Now, wait until `sample-rabbitmq` has status `Ready`. i.e, + +```bash +$ kubectl get rabbitmq -n demo +NAME VERSION STATUS AGE +rabbitmq-autoscale 3.13.2 Ready 3m46s +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get sts -n demo rabbitmq-autoscale -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-43266d76-f280-4cca-bd78-d13660a84db9 1Gi RWO Delete Bound demo/data-sample-rabbitmq-2 topolvm-provisioner 57s +pvc-4a509b05-774b-42d9-b36d-599c9056af37 1Gi RWO Delete Bound demo/data-sample-rabbitmq-0 topolvm-provisioner 58s +pvc-c27eee12-cd86-4410-b39e-b1dd735fc14d 1Gi RWO Delete Bound demo/data-sample-rabbitmq-1 topolvm-provisioner 57s +``` + +You can see the petset has 1GB storage, and the capacity of all the persistent volume is also 1GB. + +We are now ready to apply the `RabbitMQAutoscaler` CRO to set up storage autoscaling for this database. + +### Storage Autoscaling + +Here, we are going to set up storage autoscaling using a RabbitMQAutoscaler Object. + +#### Create RabbitMQAutoscaler Object + +In order to set up vertical autoscaling for this replicaset database, we have to create a `RabbitMQAutoscaler` CRO with our desired configuration. Below is the YAML of the `RabbitMQAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: RabbitMQAutoscaler +metadata: + name: rabbitmq-storage-autosclaer + namespace: demo +spec: + databaseRef: + name: rabbitmq-autoscale + storage: + rabbitmq: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 20 + scalingThreshold: 30 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `sample-rabbitmq` database. +- `spec.storage.rabbitmq.trigger` specifies that storage autoscaling is enabled for this database. +- `spec.storage.rabbitmq.usageThreshold` specifies storage usage threshold, if storage usage exceeds `20%` then storage autoscaling will be triggered. +- `spec.storage.rabbitmq.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `20%` of the current amount. +- `spec.storage.rabbitmq.expansionMode` specifies the expansion mode of volume expansion `rabbitmqOpsRequest` created by `rabbitmqAutoscaler`. topolvm-provisioner supports online volume expansion so here `expansionMode` is set as "Online". + +Let's create the `rabbitmqAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/rabbitmq/autoscaler/storage/cluster/examples/mdas-storage.yaml +rabbitmqautoscaler.autoscaling.kubedb.com/md-as-st created +``` + +#### Storage Autoscaling is set up successfully + +Let's check that the `rabbitmqautoscaler` resource is created successfully, + +```bash +$ kubectl get rabbitmqautoscaler -n demo +NAME AGE +md-as-st 33s + +$ kubectl describe rabbitmqautoscaler rabbitmq-storage-autosclaer -n demo +Name: md-as-st +Namespace: demo +Labels: +Annotations: API Version: autoscaling.kubedb.com/v1alpha1 +Kind: rabbitmqAutoscaler +Metadata: + Creation Timestamp: 2022-01-14T06:08:02Z + Generation: 1 + Managed Fields: + ... + Resource Version: 24009 + UID: 4f45a3b3-fc72-4d04-b52c-a770944311f6 +Spec: + Database Ref: + Name: sample-rabbitmq + Storage: + rabbitmq: + Scaling Threshold: 20 + Trigger: On + Usage Threshold: 20 +Events: +``` + +So, the `rabbitmqautoscaler` resource is created successfully. + +Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. + +Let's exec into the database pod and fill the database volume(`var/lib/mysql`) using the following commands: + +```bash +$ kubectl exec -it -n demo sample-rabbitmq-0 -- bash +root@sample-rabbitmq-0:/ df -h /var/lib/mysql +Filesystem Size Used Avail Use% Mounted on +/dev/topolvm/57cd4330-784f-42c1-bf8e-e743241df164 1014M 357M 658M 36% /var/lib/mysql +root@sample-rabbitmq-0:/ dd if=/dev/zero of=/var/lib/mysql/file.img bs=500M count=1 +1+0 records in +1+0 records out +524288000 bytes (524 MB, 500 MiB) copied, 0.340877 s, 1.5 GB/s +root@sample-rabbitmq-0:/ df -h /var/lib/mysql +Filesystem Size Used Avail Use% Mounted on +/dev/topolvm/57cd4330-784f-42c1-bf8e-e743241df164 1014M 857M 158M 85% /var/lib/mysql +``` + +So, from the above output we can see that the storage usage is 83%, which exceeded the `usageThreshold` 20%. + +Let's watch the `rabbitmqopsrequest` in the demo namespace to see if any `rabbitmqopsrequest` object is created. After some time you'll see that a `rabbitmqopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. + +```bash +$ kubectl get rabbitmqopsrequest -n demo +NAME TYPE STATUS AGE +mops-sample-rabbitmq-xojkua VolumeExpansion Progressing 15s +``` + +Let's wait for the ops request to become successful. + +```bash +$ kubectl get rabbitmqopsrequest -n demo +NAME TYPE STATUS AGE +mops-sample-rabbitmq-xojkua VolumeExpansion Successful 97s +``` + +We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe rabbitmqopsrequest -n demo mops-sample-rabbitmq-xojkua +Name: mops-sample-rabbitmq-xojkua +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=sample-rabbitmq + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=rabbitmqs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: rabbitmqOpsRequest +Metadata: + Creation Timestamp: 2022-01-14T06:13:10Z + Generation: 1 + Managed Fields: ... + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: rabbitmqAutoscaler + Name: md-as-st + UID: 4f45a3b3-fc72-4d04-b52c-a770944311f6 + Resource Version: 25557 + UID: 90763a49-a03f-407c-a233-fb20c4ab57d7 +Spec: + Database Ref: + Name: sample-rabbitmq + Type: VolumeExpansion + Volume Expansion: + rabbitmq: 1594884096 +Status: + Conditions: + Last Transition Time: 2022-01-14T06:13:10Z + Message: Controller has started to Progress the rabbitmqOpsRequest: demo/mops-sample-rabbitmq-xojkua + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-01-14T06:14:25Z + Message: Volume Expansion performed successfully in rabbitmq pod for rabbitmqOpsRequest: demo/mops-sample-rabbitmq-xojkua + Observed Generation: 1 + Reason: SuccessfullyVolumeExpanded + Status: True + Type: VolumeExpansion + Last Transition Time: 2022-01-14T06:14:25Z + Message: Controller has successfully expand the volume of rabbitmq demo/mops-sample-rabbitmq-xojkua + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m58s KubeDB Enterprise Operator Start processing for rabbitmqOpsRequest: demo/mops-sample-rabbitmq-xojkua + Normal Starting 2m58s KubeDB Enterprise Operator Pausing rabbitmq databse: demo/sample-rabbitmq + Normal Successful 2m58s KubeDB Enterprise Operator Successfully paused rabbitmq database: demo/sample-rabbitmq for rabbitmqOpsRequest: mops-sample-rabbitmq-xojkua + Normal Successful 103s KubeDB Enterprise Operator Volume Expansion performed successfully in rabbitmq pod for rabbitmqOpsRequest: demo/mops-sample-rabbitmq-xojkua + Normal Starting 103s KubeDB Enterprise Operator Updating rabbitmq storage + Normal Successful 103s KubeDB Enterprise Operator Successfully Updated rabbitmq storage + Normal Starting 103s KubeDB Enterprise Operator Resuming rabbitmq database: demo/sample-rabbitmq + Normal Successful 103s KubeDB Enterprise Operator Successfully resumed rabbitmq database: demo/sample-rabbitmq + Normal Successful 103s KubeDB Enterprise Operator Controller has Successfully expand the volume of rabbitmq: demo/sample-rabbitmq +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the replicaset database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get sts -n demo sample-rabbitmq -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1594884096" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-43266d76-f280-4cca-bd78-d13660a84db9 2Gi RWO Delete Bound demo/data-sample-rabbitmq-2 topolvm-provisioner 23m +pvc-4a509b05-774b-42d9-b36d-599c9056af37 2Gi RWO Delete Bound demo/data-sample-rabbitmq-0 topolvm-provisioner 24m +pvc-c27eee12-cd86-4410-b39e-b1dd735fc14d 2Gi RWO Delete Bound demo/data-sample-rabbitmq-1 topolvm-provisioner 23m +``` + +The above output verifies that we have successfully autoscaled the volume of the rabbitmq replicaset database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete rabbitmq -n demo sample-rabbitmq +kubectl delete rabbitmqautoscaler -n demo md-as-st +kubectl delete ns demo +``` diff --git a/docs/guides/rabbitmq/concepts/appbinding.md b/docs/guides/rabbitmq/concepts/appbinding.md index 789d2aee38..bcde6c41fa 100644 --- a/docs/guides/rabbitmq/concepts/appbinding.md +++ b/docs/guides/rabbitmq/concepts/appbinding.md @@ -18,8 +18,6 @@ section_menu_id: guides An `AppBinding` is a Kubernetes `CustomResourceDefinition`(CRD) which points to an application using either its URL (usually for a non-Kubernetes resident service instance) or a Kubernetes service object (if self-hosted in a Kubernetes cluster), some optional parameters and a credential secret. To learn more about AppBinding and the problems it solves, please go through this blog post: [The case for AppBinding](https://appscode.com/blog/post/the-case-for-appbinding). -If you deploy a database using [KubeDB](https://kubedb.com/docs/latest/concepts/), `AppBinding` object will be created automatically for it. Otherwise, you have to create an `AppBinding` object manually pointing to your desired database. - ## AppBinding CRD Specification Like any official Kubernetes resource, an `AppBinding` has `TypeMeta`, `ObjectMeta` and `Spec` sections. However, unlike other Kubernetes resources, it does not have a `Status` section. @@ -102,28 +100,6 @@ RabbitMQ : | `username` | Username of the target database. | | `password` | Password for the user specified by `username`. | -PostgreSQL : - -| Key | Usage | -| ------------------- | --------------------------------------------------- | -| `POSTGRES_USER` | Username of the target database. | -| `POSTGRES_PASSWORD` | Password for the user specified by `POSTGRES_USER`. | - -MySQL : - -| Key | Usage | -| ---------- | ---------------------------------------------- | -| `username` | Username of the target database. | -| `password` | Password for the user specified by `username`. | - - -Elasticsearch: - -| Key | Usage | -| ---------------- | ----------------------- | -| `ADMIN_USERNAME` | Admin username | -| `ADMIN_PASSWORD` | Password for admin user | - #### spec.appRef appRef refers to the underlying application. It has 4 fields named `apiGroup`, `kind`, `name` & `namespace`. diff --git a/docs/guides/rabbitmq/concepts/catalog.md b/docs/guides/rabbitmq/concepts/catalog.md index 3f48f46445..d03bc3ccfe 100644 --- a/docs/guides/rabbitmq/concepts/catalog.md +++ b/docs/guides/rabbitmq/concepts/catalog.md @@ -35,24 +35,24 @@ apiVersion: catalog.kubedb.com/v1alpha1 kind: RabbitMQVersion metadata: annotations: - meta.helm.sh/release-name: kubedb-catalog + meta.helm.sh/release-name: kubedb meta.helm.sh/release-namespace: kubedb - creationTimestamp: "2024-08-22T12:37:56Z" - generation: 2 + creationTimestamp: "2024-09-10T05:57:12Z" + generation: 1 labels: - app.kubernetes.io/instance: kubedb-catalog + app.kubernetes.io/instance: kubedb app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: kubedb-catalog app.kubernetes.io/version: v2024.8.21 helm.sh/chart: kubedb-catalog-v2024.8.21 name: 3.13.2 - resourceVersion: "262093" - uid: 8cc7b931-a22a-41eb-a9ba-9c3247436326 + resourceVersion: "46385" + uid: d853aaf9-e9b8-40b8-9663-a201a5a645c1 spec: db: image: ghcr.io/appscode-images/rabbitmq:3.13.2-management-alpine initContainer: - image: raihankhanraka/rabbitmq-init:3.13.2 + image: ghcr.io/kubedb/rabbitmq-init:3.13.2 securityContext: runAsUser: 999 version: 3.13.2 diff --git a/docs/guides/rabbitmq/concepts/opsrequest.md b/docs/guides/rabbitmq/concepts/opsrequest.md index bb5eeaa0d5..e6febcca5c 100644 --- a/docs/guides/rabbitmq/concepts/opsrequest.md +++ b/docs/guides/rabbitmq/concepts/opsrequest.md @@ -26,6 +26,8 @@ Here, some sample `RabbitMQOpsRequest` CRs for different administrative operatio **Sample `RabbitMQOpsRequest` for updating database:** +Let's assume that you have a KubeDB managed RabbitMQ cluster named `rm-quickstart` running on your kubernetes with version `3.12.12`. Now, You can update it's version to `3.13.2` using the following manifest. + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest @@ -35,13 +37,15 @@ metadata: spec: type: UpdateVersion databaseRef: - name: rabbitmq + name: rm-quickstart updateVersion: targetVersion: 3.13.2 ``` **Sample `RabbitMQOpsRequest` Objects for Horizontal Scaling of the database Cluster:** +You can scale up and down your rabbitmq cluster horizontally. However, Scale down operation comes with a caution. By design, RabbitMQ classic queues and non-replicated quorum queues can not be moved from one node to another. So, Scaling down operation in RabbitMQ clusters with classic or non-replicated quorum queues will cause failure. Make sure such types of queues are moved to nodes with lower index number or, convert them to replicated quorum queues. + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest @@ -51,13 +55,15 @@ metadata: spec: type: HorizontalScaling databaseRef: - name: rabbitmq + name: rm-quickstart horizontalScaling: node: 3 ``` **Sample `RabbitMQOpsRequest` Objects for Vertical Scaling of the database cluster:** +You can vertically scale up or down your cluster by updating the requested cpu, memory or, by limiting them. + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest @@ -74,23 +80,28 @@ spec: requests: cpu: 600m memory: 1.2Gi + limits: + cpu: 1 + memory: 2Gi ``` **Sample `RabbitMQOpsRequest` Objects for Reconfiguring database cluster:** +Reconfigure your cluster by applying new configuration via `rabbitmq.conf` file directly, via custom configuration secret, or by removing custom config secret and resetting to default one. + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest metadata: - name: rabbitmq-reconfigure + name: reconfigure namespace: demo spec: type: Reconfigure databaseRef: - name: rabbitmq + name: rm-quickstart configuration: applyConfig: - rabbitmq.conf: |- + rabbitmq.conf: | default_vhost = /customvhost ``` @@ -103,7 +114,7 @@ metadata: spec: type: Reconfigure databaseRef: - name: rabbitmq + name: rm-quickstart configuration: removeCustomConfig: true ``` @@ -117,7 +128,7 @@ metadata: spec: type: Reconfigure databaseRef: - name: rabbitmq + name: rm-quickstart configuration: configSecret: name: new-custom-config @@ -125,6 +136,8 @@ spec: **Sample `RabbitMQOpsRequest` Objects for Volume Expansion of database cluster:** +You can expand RabbitMQ storage volume in both online and offline mode (detaching nodes prior to expanding storage). + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest @@ -132,7 +145,7 @@ metadata: name: rm-online-volume-expansion namespace: rabbit spec: - ifReady: "Always" + apply: "IfReady" type: VolumeExpansion databaseRef: name: rabbitmq @@ -141,8 +154,6 @@ spec: node: 1.5Gi ``` -**Sample `RabbitMQOpsRequest` Objects for Reconfiguring TLS of the database:** - ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest @@ -159,6 +170,10 @@ spec: node: 1.5Gi ``` +**Sample `RabbitMQOpsRequest` Objects for Reconfiguring TLS of the database:** + +You can use this Ops-Request to Add, Update, Remove or Rotate Your certificates used in TLS connectivity. + ```yaml apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest diff --git a/docs/guides/rabbitmq/concepts/rabbitmq.md b/docs/guides/rabbitmq/concepts/rabbitmq.md index 4cdddf4212..8a847e1387 100644 --- a/docs/guides/rabbitmq/concepts/rabbitmq.md +++ b/docs/guides/rabbitmq/concepts/rabbitmq.md @@ -16,7 +16,7 @@ section_menu_id: guides ## KubeDB managed RabbitMQ -`RabbitMQ` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [RabbitMQ](https://www.RabbitMQ.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a RabbitMQ object, and the KubeDB operator will create Kubernetes objects in the desired state for you. +`RabbitMQ` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [RabbitMQ](https://www.rabbitmq.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a RabbitMQ object, and the KubeDB operator will create Kubernetes objects in the desired state for you. ## RabbitMQ Spec @@ -230,7 +230,7 @@ To learn more about how to use a custom configuration file see [here](/docs/guid ### spec.podTemplate -KubeDB allows providing a template for pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for Pgpool. +KubeDB allows providing a template for pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for RabbitMQ. KubeDB accept following fields to set in `spec.podTemplate:` @@ -238,8 +238,8 @@ KubeDB accept following fields to set in `spec.podTemplate:` - annotations (pod's annotation) - labels (pod's labels) - controller: - - annotations (statefulset's annotation) - - labels (statefulset's labels) + - annotations (PetSet's annotation) + - labels (PetSet's labels) - spec: - volumes - initContainers diff --git a/docs/guides/rabbitmq/configuration/_index.md b/docs/guides/rabbitmq/configuration/_index.md new file mode 100755 index 0000000000..04634c03dd --- /dev/null +++ b/docs/guides/rabbitmq/configuration/_index.md @@ -0,0 +1,10 @@ +--- +title: Run RabbitMQ with Custom Configuration +menu: + docs_{{ .version }}: + identifier: rm-configuration + name: Custom Configuration + parent: rm-guides + weight: 30 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/rabbitmq/configuration/using-config-file.md b/docs/guides/rabbitmq/configuration/using-config-file.md new file mode 100644 index 0000000000..76633cc79b --- /dev/null +++ b/docs/guides/rabbitmq/configuration/using-config-file.md @@ -0,0 +1,174 @@ +--- +title: Run RabbitMQ with Custom Configuration +menu: + docs_{{ .version }}: + identifier: rm-using-config-file-configuration + name: Customize Configurations + parent: rm-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for RabbitMQ. This tutorial will show you how to use KubeDB to run a RabbitMQ with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/rabbitmq](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +RabbitMQ allows configuring via configuration file. The default configuration file for RabbitMQ deployed by `KubeDB` can be found in `/config/rabbitmq.conf`. When `spec.configSecret` is set to rabbitmq, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to rabbitmq for use it as the configuration file. + +> To learn available configuration option of Pgpool see [Configuration Options](https://www.rabbitmq.com/docs/configure). + +At first, you have to create a secret with your configuration file contents as the value of this key `rabbitmq.conf`. Then, you have to specify the name of this secret in `spec.configSecret.name` section while creating rabbitmq CRO. + +## Custom Configuration + +At first, create `rabbitmq.conf` file containing required configuration settings. + +```bash +$ cat rabbitmq.conf +vm_memory_high_watermark.absolute = 4GB +heartbeat = 100 +collect_statistics = coarse +``` + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo rm-configuration --from-file=./rabbitmq.conf +secret/rm-configuration created +``` + +Verify the secret has the configuration file. + +```bash +$ kubectl get secret -n demo rm-configuration -o yaml +apiVersion: v1 +data: + rabbitmq.conf: bnVtX2luaXRfY2hpbGRyZW4gPSA2Cm1heF9wb29sID0gNjUKY2hpbGRfbGlmZV90aW1lID0gNDAwCg== +kind: Secret +metadata: + creationTimestamp: "2024-07-29T12:40:48Z" + name: rm-configuration + namespace: demo + resourceVersion: "32076" + uid: 80f5324a-9a65-4801-b136-21d2fa001b12 +type: Opaque + +$ echo bnVtX2luaXRfY2hpbGRyZW4gPSA2Cm1heF9wb29sID0gNjUKY2hpbGRfbGlmZV90aW1lID0gNDAwCg== | base64 -d +vm_memory_high_watermark.absolute = 4GB +heartbeat = 100 +collect_statistics = coarse +``` + +Now, create rabbitmq crd specifying `spec.configSecret` field. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rm-custom-config + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + configSecret: + name: rm-configuration + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/rabbitmq-config-file.yaml +rabbitmq.kubedb.com/rm-custom-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rm-custom-config-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo rm-custom-config-0 +NAME READY STATUS RESTARTS AGE +rm-custom-config-0 1/1 Running 0 35s +``` + +Now, we will check if the pgpool has started with the custom configuration we have provided. + +Now, you can exec into the pgpool pod and find if the custom configuration is there, + +```bash +$ kubectl exec -it -n demo rm-custom-config-0 -- bash +rm-custom-config-0:/$ cat /config/rabbitmq.conf +log.console.level= info +stomp.default_user= $(RABBITMQ_DEFAULT_USER) +mqtt.allow_anonymous= false +log.file.level= info +loopback_users= none +log.console= true +cluster_partition_handling= pause_minority +vm_memory_high_watermark.absolute= 4GB +mqtt.default_pass= $(RABBITMQ_DEFAULT_PASS) +cluster_formation.peer_discovery_backend= rabbit_peer_discovery_k8s +listeners.tcp.default= 5672 +default_user= $(RABBITMQ_DEFAULT_USER) +cluster_formation.node_cleanup.only_log_warning= true +cluster_formation.k8s.service_name= rm-custom-config-pods +heartbeat= 100 +cluster_name= rm-custom-config +collect_statistics= coarse +default_pass= $(RABBITMQ_DEFAULT_PASS) +cluster_formation.k8s.host= kubernetes.default.svc.cluster.local +mqtt.default_user= $(RABBITMQ_DEFAULT_USER) +stomp.default_pass= $(RABBITMQ_DEFAULT_PASS) +queue_master_locator= min-masters +cluster_formation.k8s.address_type= hostname +rm-custom-config-0:/$ exit +exit +``` + +As we can see from the configuration of running rabbitmq, the value of `collect_statistics`, `heartbeat` and `vm_memory_high_watermark.absolute` has been set to our desired value successfully. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete -n demo rm/rm-custom-config +kubectl delete -n demo secret rm-configuration +kubectl delete rm -n demo rm-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your rabbitmq database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). +- Monitor your Pgpool database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Detail concepts of [RabbitMQVersion object](/docs/guides/rabbitmq/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/configuration/using-podtemplate.md b/docs/guides/rabbitmq/configuration/using-podtemplate.md new file mode 100644 index 0000000000..3ffb72753c --- /dev/null +++ b/docs/guides/rabbitmq/configuration/using-podtemplate.md @@ -0,0 +1,539 @@ +--- +title: Run RabbitMQ with Custom Configuration +menu: + docs_{{ .version }}: + identifier: using-podtemplate-configuration-rm + name: Customize PodTemplate + parent: rm-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Pgpool with Custom PodTemplate + +KubeDB supports providing custom configuration for Pgpool via [PodTemplate](/docs/guides/rabbitmq/concepts/rabbitmq.md#specpodtemplate). This tutorial will show you how to use KubeDB to run a RabbitMQ database with custom configuration using PodTemplate. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/pgpool](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for RabbitMQ database. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - volumes + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - affinity + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + - livenessProbe + - readinessProbe + - lifecycle + +Read about the fields in details in [PodTemplate concept](/docs/guides/rabbitmq/concepts/rabbitmq.md#specpodtemplate), + + +## CRD Configuration + +Below is the YAML for the Pgpool created in this example. Here, `spec.podTemplate.spec.containers[].env` specifies additional environment variables by users. + +In this tutorial, we will register additional two users at starting time of Pgpool. So, the fact is any environment variable with having `suffix: USERNAME` and `suffix: PASSWORD` will be key value pairs of username and password and will be registered in the `pool_passwd` file of Pgpool. So we can use these users after Pgpool initialize without even syncing them. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rm-misc-config + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + podTemplate: + spec: + containers: + - name: rabbitmq + env: + - name: "RABBITMQ_LOG_BASE" + value: '/var/log/cluster' + - name: "RABBITMQ_CONSOLE_LOG" + value: 'new' + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/rm-misc-config.yaml +pgpool.kubedb.com/pp-misc-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rm-misc-config-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo +NAME READY STATUS RESTARTS AGE +rm-misc-config-0 1/1 Running 0 68s +``` + +Now, check if the rabbitmq has started with the custom configuration we have provided. We will fetch log in the pod and see the `RABBITMQ_LOG_BASE`, the new log directory exists of not. + +```bash +$ kubectl exec -it -n demo -- bash + ## ## RabbitMQ 3.13.2 + ## ## + ########## Copyright (c) 2007-2024 Broadcom Inc and/or its subsidiaries + ###### ## + ########## Licensed under the MPL 2.0. Website: https://rabbitmq.com + + Erlang: 26.2.5 [jit] + TLS Library: OpenSSL - OpenSSL 3.1.5 30 Jan 2024 + Release series support status: supported + + Doc guides: https://www.rabbitmq.com/docs + Support: https://www.rabbitmq.com/docs/contact + Tutorials: https://www.rabbitmq.com/tutorials + Monitoring: https://www.rabbitmq.com/docs/monitoring + Upgrading: https://www.rabbitmq.com/docs/upgrade + + Logs: /var/log/rabbitmq/cluster/rabbit@rm-misc-config-0.rm-misc-config-pods.demo.log + +``` +So, we can see that that logs are being written to **Logs: /var/log/rabbitmq/cluster**/rabbit@rm-misc-config-0.rm-misc-config-pods.demo.log file. + +## Custom Sidecar Containers + +Here in this example we will add an extra sidecar container with our pgpool container. Suppose, you are running a KubeDB-managed rabbitmq, and you need to monitor the general logs. We can configure rabbitmq to write those logs in any directory, in the prior example we have configured rabbitmq to write logs to `/var/log/rabbitmq/cluster` directory. In order to export those logs to some remote monitoring solution (such as, Elasticsearch, Logstash, Kafka or Redis) will use a tool like [Filebeat](https://www.elastic.co/beats/filebeat). Filebeat is used to ship logs and files from devices, cloud, containers and hosts. So, it is required to run Filebeat as a sidecar container along with the KubeDB-managed rabbitmq. Here’s a quick demonstration on how to accomplish it. + +Firstly, we are going to make our custom filebeat image with our required configuration. +```yaml +filebeat.inputs: + - type: log + paths: + - /var/log/rabbitmq/cluster +output.console: + pretty: true +``` +Save this yaml with name `filebeat.yml`. Now prepare the dockerfile, +```dockerfile +FROM elastic/filebeat:7.17.1 +COPY filebeat.yml /usr/share/filebeat +USER root +RUN chmod go-w /usr/share/filebeat/filebeat.yml +USER filebeat +``` +Now run these following commands to build and push the docker image to your docker repository. +```bash +$ docker build -t repository_name/custom_filebeat:latest . +$ docker push repository_name/custom_filebeat:latest +``` +Now we will deploy our pgpool with custom sidecar container and will also use the `spec.initConfig` to configure the logs related settings. Here is the yaml of our pgpool: +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-custom-sidecar + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + podTemplate: + spec: + containers: + - name: pgpool + volumeMounts: + - mountPath: /var/log/rabbitmq/cluster + name: log + readOnly: false + - name: filebeat + image: repository_name/custom_filebeat:latest + volumeMounts: + - mountPath: /var/log/rabbitmq/cluster + name: log + readOnly: true + volumes: + - name: log + emptyDir: {} + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/rabbitmq-config-sidecar.yaml +rabbitmq.kubedb.com/rabbitmq-custom-sidecar created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rabbitmq-custom-sidecar-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-custom-sidecar-0 2/2 Running 0 33s + +``` +Now, Let’s fetch the logs shipped to filebeat console output. The outputs will be generated in json format. + +```bash +$ kubectl logs -f -n demo rabbitmq-custom-sidecar-0 -c filebeat +``` +We will find the query logs in filebeat console output. +So, we have successfully extracted logs from rabbitmq to our sidecar filebeat container. + +## Using Node Selector + +Here in this example we will use [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) to schedule our pgpool pod to a specific node. Applying nodeSelector to the Pod involves several steps. We first need to assign a label to some node that will be later used by the `nodeSelector` . Let’s find what nodes exist in your cluster. To get the name of these nodes, you can run: + +```bash +$ kubectl get nodes --show-labels +NAME STATUS ROLES AGE VERSION LABELS +lke212553-307295-339173d10000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-339173d10000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=618158120a299c6fd37f00d01d355ca18794c467,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5541798e0000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5541798e0000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=75cfe3dbbb0380f1727efc53f5192897485e95d5,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5b53c5520000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5b53c5520000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=792bac078d7ce0e548163b9423416d7d8c88b08f,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +``` +As you see, we have three nodes in the cluster: lke212553-307295-339173d10000, lke212553-307295-5541798e0000, and lke212553-307295-5b53c5520000. + +Next, select a node to which you want to add a label. For example, let’s say we want to add a new label with the key `disktype` and value ssd to the `lke212553-307295-5541798e0000` node, which is a node with the SSD storage. To do so, run: +```bash +$ kubectl label nodes lke212553-307295-5541798e0000 disktype=ssd +node/lke212553-307295-5541798e0000 labeled +``` +As you noticed, the command above follows the format `kubectl label nodes =` . +Finally, let’s verify that the new label was added by running: +```bash + $ kubectl get nodes --show-labels +NAME STATUS ROLES AGE VERSION LABELS +lke212553-307295-339173d10000 Ready 41m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-339173d10000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=618158120a299c6fd37f00d01d355ca18794c467,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5541798e0000 Ready 41m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,disktype=ssd,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5541798e0000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=75cfe3dbbb0380f1727efc53f5192897485e95d5,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5b53c5520000 Ready 41m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5b53c5520000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=792bac078d7ce0e548163b9423416d7d8c88b08f,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +``` +As you see, the lke212553-307295-5541798e0000 now has a new label disktype=ssd. To see all labels attached to the node, you can also run: +```bash +$ kubectl describe node "lke212553-307295-5541798e0000" +Name: lke212553-307295-5541798e0000 +Roles: +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/instance-type=g6-dedicated-4 + beta.kubernetes.io/os=linux + disktype=ssd + failure-domain.beta.kubernetes.io/region=ap-south + kubernetes.io/arch=amd64 + kubernetes.io/hostname=lke212553-307295-5541798e0000 + kubernetes.io/os=linux + lke.linode.com/pool-id=307295 + node.k8s.linode.com/host-uuid=75cfe3dbbb0380f1727efc53f5192897485e95d5 + node.kubernetes.io/instance-type=g6-dedicated-4 + topology.kubernetes.io/region=ap-south + topology.linode.com/region=ap-south +``` +Along with the `disktype=ssd` label we’ve just added, you can see other labels such as `beta.kubernetes.io/arch` or `kubernetes.io/hostname`. These are all default labels attached to Kubernetes nodes. + +Now let's create a pgpool with this new label as nodeSelector. Below is the yaml we are going to apply: +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-node-selector + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + podTemplate: + spec: + nodeSelector: + disktype: ssd + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/rabbitmq-node-selector.yaml +rabbitmq.kubedb.com/rabbitmq-node-selector created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rabbitmq-node-selector-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pods -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-node-selector-0 1/1 Running 0 60s +``` +As we see the pod is running, you can verify that by running `kubectl get pods -n demo pgpool-node-selector-0 -o wide` and looking at the “NODE” to which the Pod was assigned. +```bash +$ kubectl get pods -n demo rabbitmq-node-selector-0 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +rabbitmq-node-selector-0 1/1 Running 0 3m19s 10.2.1.7 lke212553-307295-5541798e0000 +``` +We can successfully verify that our pod was scheduled to our desired node. + +## Using Taints and Tolerations + +Here in this example we will use [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to schedule our rabbitmq pod to a specific node and also prevent from scheduling to nodes. Applying taints and tolerations to the Pod involves several steps. Let’s find what nodes exist in your cluster. To get the name of these nodes, you can run: + +```bash +$ kubectl get nodes --show-labels +NAME STATUS ROLES AGE VERSION LABELS +lke212553-307295-339173d10000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-339173d10000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=618158120a299c6fd37f00d01d355ca18794c467,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5541798e0000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5541798e0000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=75cfe3dbbb0380f1727efc53f5192897485e95d5,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +lke212553-307295-5b53c5520000 Ready 36m v1.30.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=g6-dedicated-4,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=ap-south,kubernetes.io/arch=amd64,kubernetes.io/hostname=lke212553-307295-5b53c5520000,kubernetes.io/os=linux,lke.linode.com/pool-id=307295,node.k8s.linode.com/host-uuid=792bac078d7ce0e548163b9423416d7d8c88b08f,node.kubernetes.io/instance-type=g6-dedicated-4,topology.kubernetes.io/region=ap-south,topology.linode.com/region=ap-south +``` +As you see, we have three nodes in the cluster: lke212553-307295-339173d10000, lke212553-307295-5541798e0000, and lke212553-307295-5b53c5520000. + +Next, we are going to taint these nodes. +```bash +$ kubectl taint nodes lke212553-307295-339173d10000 key1=node1:NoSchedule +node/lke212553-307295-339173d10000 tainted + +$ kubectl taint nodes lke212553-307295-5541798e0000 key1=node2:NoSchedule +node/lke212553-307295-5541798e0000 tainted + +$ kubectl taint nodes lke212553-307295-5b53c5520000 key1=node3:NoSchedule +node/lke212553-307295-5b53c5520000 tainted +``` +Let's see our tainted nodes here, +```bash +$ kubectl get nodes -o json | jq -r '.items[] | select(.spec.taints != null) | .metadata.name, .spec.taints' +lke212553-307295-339173d10000 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node1" + } +] +lke212553-307295-5541798e0000 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node2" + } +] +lke212553-307295-5b53c5520000 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node3" + } +] +``` +We can see that our taints were successfully assigned. Now let's try to create a rabbitmq without proper tolerations. Here is the yaml of rabbitmq we are going to create - +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-without-tolerations + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/rabbitmq-without-tolerations.yaml +rabbitmq.kubedb.com/rabbitmq-without-tolerations created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rabbitmq-without-tolerations-0` has been created and running. + +Check that the petset's pod is running or not, +```bash +$ kubectl get pods -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-without-tolerations-0 0/1 Pending 0 3m35s +``` +Here we can see that the pod is not running. So let's describe the pod, +```bash +$ kubectl describe pods -n demo rabbitmq-without-tolerations-0 +Name: rabbitmq-without-tolerations-0 +Namespace: demo +Priority: 0 +Service Account: default +Node: +Labels: app.kubernetes.io/component=connection-pooler + app.kubernetes.io/instance=rabbitmq-without-tolerations + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=pgpools.kubedb.com + apps.kubernetes.io/pod-index=0 + controller-revision-hash=pgpool-without-tolerations-5b85f9cd + statefulset.kubernetes.io/pod-name=rabbitmq-without-tolerations-0 +Annotations: +Status: Pending +IP: +IPs: +Controlled By: PetSet/rabbitmq-without-tolerations +Containers: + pgpool: + Image: ghcr.io/appscode-images/rabbitmq:3.13.2@sha256:7f2537e3dc69dae2cebea3500502e6a2b764b42911881e623195eeed32569217 + Ports: 9999/TCP, 9595/TCP + Host Ports: 0/TCP, 0/TCP + SeccompProfile: RuntimeDefault + Limits: + memory: 1Gi + Requests: + cpu: 500m + memory: 1Gi + Environment: + POSTGRES_USERNAME: postgres + POSTGRES_PASSWORD: 5ja8dHF79x4o6Ot6 + PGPOOL_PCP_USER: Optional: false + PGPOOL_PCP_PASSWORD: Optional: false + PGPOOL_PASSWORD_ENCRYPTION_METHOD: scram-sha-256 + PGPOOL_ENABLE_POOL_PASSWD: true + PGPOOL_SKIP_PASSWORD_ENCRYPTION: false + Mounts: + /config from pgpool-config (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-69qx2 (ro) +Conditions: + Type Status + PodScheduled False +Volumes: + pgpool-config: + Type: Secret (a volume populated by a Secret) + SecretName: pgpool-without-tolerations-config + Optional: false + kube-api-access-69qx2: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: Burstable +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Topology Spread Constraints: kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/component=connection-pooler,app.kubernetes.io/instance=pgpool-without-tolerations,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=pgpools.kubedb.com + topology.kubernetes.io/zone:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/component=connection-pooler,app.kubernetes.io/instance=pgpool-without-tolerations,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=pgpools.kubedb.com +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 5m20s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {key1: node1}, 1 node(s) had untolerated taint {key1: node2}, 1 node(s) had untolerated taint {key1: node3}. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling. + Warning FailedScheduling 11s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {key1: node1}, 1 node(s) had untolerated taint {key1: node2}, 1 node(s) had untolerated taint {key1: node3}. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling. + Normal NotTriggerScaleUp 13s (x31 over 5m15s) cluster-autoscaler pod didn't trigger scale-up: +``` +Here we can see that the pod has no tolerations for the tainted nodes and because of that the pod is not able to scheduled. + +So, let's add proper tolerations and create another rabbitmq. Here is the yaml we are going to apply, +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-with-tolerations + namespace: demo +spec: + version: "3.13.2" + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + deletionPolicy: WipeOut +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/configuration/pgpool-with-tolerations.yaml +pgpool.kubedb.com/rabbitmq-with-tolerations created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `rabbitmq-with-tolerations-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pods -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-with-tolerations-0 1/1 Running 0 2m +``` +As we see the pod is running, you can verify that by running `kubectl get pods -n demo rabbitmq-with-tolerations-0 -o wide` and looking at the “NODE” to which the Pod was assigned. +```bash +$ kubectl get pods -n demo rabbitmq-with-tolerations-0 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +rabbitmq-with-tolerations-0 1/1 Running 0 3m49s 10.2.0.8 lke212553-307295-339173d10000 +``` +We can successfully verify that our pod was scheduled to the node which it has tolerations. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete -n demo rm rm-misc-config rabbitmq-custom-sidecar rabbitmq-node-selector rabbitmq-with-tolerations rabbitmq-without-tolerations +kubectl delete ns demo +``` + +If you would like to uninstall KubeDB operator, please follow the steps [here](/docs/setup/README.md). + +## Next Steps + +- [Quickstart rabbitmq](/docs/guides/rabbitmq/quickstart/quickstart.md) with KubeDB Operator. +- Monitor your rabbitmq database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). +- Monitor your rabbitmq database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [rabbitmq object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/images/reconfigure.svg b/docs/guides/rabbitmq/images/reconfigure.svg new file mode 100644 index 0000000000..258f9a91e6 --- /dev/null +++ b/docs/guides/rabbitmq/images/reconfigure.svg @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/guides/rabbitmq/monitoring/overview.md b/docs/guides/rabbitmq/monitoring/overview.md index 6b255cf59d..404e5f6975 100644 --- a/docs/guides/rabbitmq/monitoring/overview.md +++ b/docs/guides/rabbitmq/monitoring/overview.md @@ -3,7 +3,7 @@ title: RabbitMQ Monitoring Overview description: RabbitMQ Monitoring Overview menu: docs_{{ .version }}: - identifier: mg-monitoring-overview + identifier: rm-monitoring-overview name: Overview parent: rm-monitoring-guides weight: 10 @@ -50,16 +50,16 @@ A sample YAML for RabbitMQ crd with `spec.monitor` section configured to enable apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: sample-mongo - namespace: databases + name: sample-rabbitmq + namespace: demo spec: - version: "4.4.26" - terminationPolicy: WipeOut + version: "3.13.2" + deletionPolicy: WipeOut configSecret: name: config storageType: Durable storage: - storageClassName: default + storageClassName: standard accessModes: - ReadWriteOnce resources: @@ -71,35 +71,13 @@ spec: serviceMonitor: labels: release: prometheus - exporter: - args: - - --collect.database - env: - - name: ENV_VARIABLE - valueFrom: - secretKeyRef: - name: env_name - key: env_value - resources: - requests: - memory: 512Mi - cpu: 200m - limits: - memory: 512Mi - cpu: 250m - securityContext: - runAsUser: 2000 - allowPrivilegeEscalation: false ``` Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in databases namespace and this `ServiceMonitor` will have `release: prometheus` label. -One thing to note that, we internally use `--collect-all` args, if the RabbitMQ exporter version >= v0.31.0 . You can check the exporter version by getting the mgversion object, like this, -`kubectl get mgversion -o=jsonpath='{.spec.exporter.image}' 4.4.26` -In that case, specifying args to collect something (as we used `--collect.database` above) will not have any effect. ## Next Steps -- Learn how to monitor RabbitMQ database with KubeDB using [builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md) -- Learn how to monitor RabbitMQ database with KubeDB using [Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). +- Learn how to monitor RabbitMQ database with KubeDB using [builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md) +- Learn how to monitor RabbitMQ database with KubeDB using [Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). diff --git a/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md b/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md index c833a52b53..bb6acf1520 100644 --- a/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md +++ b/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md @@ -36,7 +36,7 @@ This tutorial will show you how to monitor RabbitMQ database using builtin [Prom namespace/demo created ``` -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Deploy RabbitMQ with Monitoring Enabled @@ -50,7 +50,7 @@ metadata: namespace: demo spec: version: "3.13.2" - terminationPolicy: WipeOut + deletionPolicy: WipeOut storage: storageClassName: "standard" accessModes: @@ -69,16 +69,16 @@ Here, Let's create the RabbitMQ crd we have shown above. ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/monitoring/builtin-prom-mgo.yaml -rabbitmq.kubedb.com/builtin-prom-mgo created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/monitoring/builtin-prom-rm.yaml +rabbitmq.kubedb.com/builtin-prom-rm created ``` Now, wait for the database to go into `Running` state. ```bash -$ kubectl get mg -n demo builtin-prom-mgo +$ kubectl get rm -n demo builtin-prom-rm NAME VERSION STATUS AGE -builtin-prom-mgo 4.4.26 Ready 2m34s +builtin-prom-rm 3.13.2 Ready 2m34s ``` KubeDB will create a separate stats service with name `{RabbitMQ crd name}-stats` for monitoring purpose. @@ -91,7 +91,7 @@ builtin-rabbitmq ClusterIP None 27017/TCP 55 builtin-rabbitmq ClusterIP 10.98.202.26 56790/TCP 36s ``` -Here, `builtin-prom-mgo-stats` service has been created for monitoring purpose. Let's describe the service. +Here, `builtin-prom-rm-stats` service has been created for monitoring purpose. Let's describe the service. ```bash $ kubectl describe svc -n demo builtin-rabbitmq-stats @@ -325,11 +325,7 @@ Forwarding from [::1]:9090 -> 9090 Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see the endpoint of `builtin-prom-mgo-stats` service as one of the targets. -

-  Prometheus Target -

- -Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `RabbitMQ` database `builtin-prom-mgo` through stats service `builtin-prom-mgo-stats`. +Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `RabbitMQ` database `builtin-prom-rm` through stats service `builtin-prom-rm-stats`. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. @@ -352,8 +348,6 @@ kubectl delete ns monitoring ## Next Steps -- Learn about [backup and restore](/docs/guides/RabbitMQ/backup/overview/index.md) RabbitMQ database using Stash. -- Learn how to configure [RabbitMQ Topology](/docs/guides/RabbitMQ/clustering/sharding.md). -- Monitor your RabbitMQ database with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. +- Monitor your RabbitMQ database with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). + - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md b/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md index b17f9e4e69..096d3aadb4 100644 --- a/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md +++ b/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md @@ -20,7 +20,7 @@ section_menu_id: guides - At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). -- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/RabbitMQ/monitoring/overview.md). +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/rabbitmq/monitoring/overview.md). - We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, you can deploy one using this helm chart [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). @@ -36,7 +36,7 @@ section_menu_id: guides -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Find out required labels for ServiceMonitor @@ -135,11 +135,11 @@ At first, let's deploy an RabbitMQ database with monitoring enabled. Below is th apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: coreos-prom-mgo + name: prom-rm namespace: demo spec: - version: "4.4.26" - terminationPolicy: WipeOut + version: "3.13.2" + deletionPolicy: WipeOut storage: storageClassName: "standard" accessModes: @@ -165,43 +165,43 @@ Here, Let's create the RabbitMQ object that we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/monitoring/coreos-prom-mgo.yaml -RabbitMQ.kubedb.com/coreos-prom-mgo created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/monitoring/prom-rm.yaml +rabbitmq.kubedb.com/prom-rm created ``` Now, wait for the database to go into `Running` state. ```bash -$ kubectl get mg -n demo coreos-prom-mgo -NAME VERSION STATUS AGE -coreos-prom-mgo 4.4.26 Ready 34s +$ kubectl get mg -n demo prom-rm +NAME VERSION STATUS AGE +prom-rm 3.13.2 Ready 34s ``` KubeDB will create a separate stats service with name `{RabbitMQ crd name}-stats` for monitoring purpose. ```bash -$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=coreos-prom-mgo" +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=prom-rm" NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -coreos-prom-mgo ClusterIP 10.96.150.171 27017/TCP 84s -coreos-prom-mgo-pods ClusterIP None 27017/TCP 84s -coreos-prom-mgo-stats ClusterIP 10.96.218.41 56790/TCP 64s +prom-rm ClusterIP 10.96.150.171 27017/TCP 84s +prom-rm-pods ClusterIP None 27017/TCP 84s +prom-rm-stats ClusterIP 10.96.218.41 56790/TCP 64s ``` -Here, `coreos-prom-mgo-stats` service has been created for monitoring purpose. +Here, `prom-rm-stats` service has been created for monitoring purpose. Let's describe this stats service. ```yaml -$ kubectl describe svc -n demo coreos-prom-mgo-stats -Name: coreos-prom-mgo-stats +$ kubectl describe svc -n demo prom-rm-stats +Name: prom-rm-stats Namespace: demo Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=coreos-prom-mgo + app.kubernetes.io/instance=prom-rm app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com + app.kubernetes.io/name=rabbitmqs.kubedb.com kubedb.com/role=stats Annotations: monitoring.appscode.com/agent: prometheus.io/operator -Selector: app.kubernetes.io/instance=coreos-prom-mgo,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=RabbitMQs.kubedb.com +Selector: app.kubernetes.io/instance=prom-rm,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=rabbitmqs.kubedb.com Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 @@ -217,18 +217,18 @@ Events: Notice the `Labels` and `Port` fields. `ServiceMonitor` will use this information to target its endpoints. -KubeDB will also create a `ServiceMonitor` crd in `demo` namespace that select the endpoints of `coreos-prom-mgo-stats` service. Verify that the `ServiceMonitor` crd has been created. +KubeDB will also create a `ServiceMonitor` crd in `demo` namespace that select the endpoints of `prom-rm-stats` service. Verify that the `ServiceMonitor` crd has been created. ```bash $ kubectl get servicemonitor -n demo NAME AGE -coreos-prom-mgo-stats 2m40s +prom-rm-stats 2m40s ``` Let's verify that the `ServiceMonitor` has the label that we had specified in `spec.monitor` section of RabbitMQ crd. ```yaml -$ kubectl get servicemonitor -n demo coreos-prom-mgo-stats -o yaml +$ kubectl get servicemonitor -n demo prom-rm-stats -o yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -236,18 +236,18 @@ metadata: generation: 1 labels: app.kubernetes.io/component: database - app.kubernetes.io/instance: coreos-prom-mgo + app.kubernetes.io/instance: prom-rm app.kubernetes.io/managed-by: kubedb.com - app.kubernetes.io/name: RabbitMQs.kubedb.com + app.kubernetes.io/name: rabbitmqs.kubedb.com release: prometheus - name: coreos-prom-mgo-stats + name: prom-rm-stats namespace: demo ownerReferences: - apiVersion: v1 blockOwnerDeletion: true controller: true kind: Service - name: coreos-prom-mgo-stats + name: prom-rm-stats uid: 68b0e8c4-cba4-4dcb-9016-4e1901ca1fd0 resourceVersion: "528373" uid: 56eb596b-d2cf-4d2c-a204-c43dbe8fe896 @@ -265,15 +265,15 @@ spec: selector: matchLabels: app.kubernetes.io/component: database - app.kubernetes.io/instance: coreos-prom-mgo + app.kubernetes.io/instance: prom-rm app.kubernetes.io/managed-by: kubedb.com - app.kubernetes.io/name: RabbitMQs.kubedb.com + app.kubernetes.io/name: rabbitmqs.kubedb.com kubedb.com/role: stats ``` Notice that the `ServiceMonitor` has label `release: prometheus` that we had specified in RabbitMQ crd. -Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `coreos-prom-mgo-stats` service. It also, target the `metrics` port that we have seen in the stats service. +Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `prom-rm-stats` service. It also, target the `metrics` port that we have seen in the stats service. ## Verify Monitoring Metrics @@ -295,11 +295,7 @@ Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 ``` -Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `metrics` endpoint of `coreos-prom-mgo-stats` service as one of the targets. - -

-  Prometheus Target -

+Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `metrics` endpoint of `prom-rm-stats` service as one of the targets. Check the `endpoint` and `service` labels marked by the red rectangles. It verifies that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create a beautiful dashboard with collected metrics. @@ -308,16 +304,14 @@ Check the `endpoint` and `service` labels marked by the red rectangles. It verif To cleanup the Kubernetes resources created by this tutorial, run following commands ```bash -kubectl delete -n demo mg/coreos-prom-mgo +kubectl delete -n demo rm/prom-rm kubectl delete ns demo ``` ## Next Steps -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Detail concepts of [RabbitMQVersion object](/docs/guides/RabbitMQ/concepts/catalog.md). -- [Backup and Restore](/docs/guides/RabbitMQ/backup/overview/index.md) process of RabbitMQ databases using Stash. -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Detail concepts of [RabbitMQVersion object](/docs/guides/rabbitmq/concepts/catalog.md). + - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/quickstart/_index.md b/docs/guides/rabbitmq/quickstart/_index.md index bb3cb786c0..5fa8492b90 100644 --- a/docs/guides/rabbitmq/quickstart/_index.md +++ b/docs/guides/rabbitmq/quickstart/_index.md @@ -2,9 +2,9 @@ title: RabbitMQ Quickstart menu: docs_{{ .version }}: - identifier: rm-quickstart-rabbitmq + identifier: rm-quickstart name: Quickstart - parent: guides-rabbitmq + parent: rm-guides weight: 10 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/quickstart/quickstart.md b/docs/guides/rabbitmq/quickstart/quickstart.md index 5baae6f917..23b731d364 100644 --- a/docs/guides/rabbitmq/quickstart/quickstart.md +++ b/docs/guides/rabbitmq/quickstart/quickstart.md @@ -2,9 +2,9 @@ title: RabbitMQ Quickstart menu: docs_{{ .version }}: - identifier: rm-quickstart-quickstart + identifier: rm-quickstart-description name: Overview - parent: rm-quickstart-rabbitmq + parent: rm-quickstart weight: 15 menu_name: docs_{{ .version }} section_menu_id: guides @@ -64,7 +64,7 @@ metadata: name: rm-quickstart namespace: demo spec: - version: "3.12.12" + version: "3.13.2" replicas: 3 storage: accessModes: @@ -99,15 +99,15 @@ rabbitmq.kubedb.com/rm-quickstart created Here, - `.spec.replica` is used to provide the number of required replicas or, peers for intended rabbitmq cluster. -- `spec.version` is the name of the RabbitMQVersion CRD where the docker images are specified. In this tutorial, a RabbitMQ `3.12.12` database is going to be created. +- `spec.version` is the name of the RabbitMQVersion CRD where the docker images are specified. In this tutorial, a RabbitMQ `3.13.2` database is going to be created. - `spec.storageType` specifies the type of storage that will be used for RabbitMQ database. It can be `Durable` or `Ephemeral`. Default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create RabbitMQ database using `EmptyDir` volume. In this case, you don't have to specify `spec.storage` field. This is useful for testing purposes. -- `spec.deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `RabbitMQ` CRD or which resources KubeDB should keep or delete when you delete `RabbitMQ` CRD. If admission webhook is enabled, It prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. Learn details of all `DeletionPolicy` [here](/docs/guides/mysql/concepts/database/index.md#specdeletionpolicy) + - `spec.deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `RabbitMQ` CRD or which resources KubeDB should keep or delete when you delete `RabbitMQ` CRD. If admission webhook is enabled, It prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. Learn details of all `DeletionPolicy` [here](/docs/guides/rabbitmq/concepts/rabbitmq.md#specdeletionpolicy) - `.spec.podTemplate` is used to provide specific pod specifications or container specification. You can override default resources, securityContext etc. set for rabbitmq container. Find details [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) - `spec.serviceTemplates` is used to provide template for the services created by KubeDB operator for RabbitMQ database. This will allow you to set the type and other properties of the services. > Note: `spec.storage` section is used to create PVC for database pod. It will create PVC with storage size specified in `storage.resources.requests` field. Don't specify limits here. PVC does not get resized automatically. -KubeDB operator watches for `RabbitMQ` objects using Kubernetes api. When a `RabbitMQ` object is created, KubeDB provisioner operator will create new PetSet (aka PetSet 2.0), Service (Primary) with the matching RabbitMQ object name and Required secrets for cluster communication and authentication if not present. KubeDB operator will also create an AppBinding resource and governing service for PetSets, if one is not already present. `AppBinding` is a Kubernetes `CustomResourceDefinition`(CRD) which points to an application using either its URL (usually for a non-Kubernetes resident service instance) or a Kubernetes service object (if self-hosted in a Kubernetes cluster), some optional parameters and a credential secret. +KubeDB operator watches for `RabbitMQ` objects using Kubernetes API. When a `RabbitMQ` object is created, KubeDB provisioner operator will create new PetSet (aka StatefulSet 2.0), Services with the matching RabbitMQ object name and Required secrets for cluster communication and authentication if not present. The services will include a primary service for Client communication with AMQP,MQTT,STOMP or WebSocket, a governing service for inter-node cluster governance, a dashboard service for connecting to management UI and interact with http endpointsm and a stats service to provide metrics endpoint if enabled. KubeDB operator will also create an AppBinding resource. `AppBinding` is a Kubernetes `CustomResourceDefinition`(CRD) which points to an application using either its URL (usually for a non-Kubernetes resident service instance) or a Kubernetes service object (if self-hosted in a Kubernetes cluster), some optional parameters and a credential secret. ```bash $ kubectl get petset -n demo @@ -127,13 +127,14 @@ pvc-c94bd3d0-8fa7-4794-9221-8295bc3e7b38 1Gi RWO Delete pvc-ddfd1987-c8b2-4c72-90ad-a8361ed4de56 1Gi RWO Delete Bound demo/rm-quickstart-data-rm-quickstart-2 standard 6m52s $ kubectl get service -n demo -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -rm-quickstart LoadBalancer 10.96.120.188 15672:30802/TCP,5672:32684/TCP 8m49s -rm-quickstart-pods ClusterIP None 4369/TCP,25672/TCP 8m49s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rm-quickstart LoadBalancer 10.128.221.60 172.232.241.73 5672:31803/TCP,1883:31938/TCP,61613:31884/TCP,15675:32567/TCP,15674:32599/TCP 8m59s +rm-quickstart-dashboard ClusterIP 10.128.240.53 15672/TCP 8m58s +rm-quickstart-pods ClusterIP None 4369/TCP,25672/TCP 8m59s $ kubectl get appbinding -n demo NAME TYPE VERSION AGE -rm-quickstart kubedb.com/rabbitmq 3.12.12 23h +rm-quickstart kubedb.com/rabbitmq 3.13.2 23h ``` KubeDB operator sets the `status.phase` to `Running` once the database is successfully created. Run the following command to see the modified `RabbitMQ` object: @@ -147,117 +148,120 @@ kind: RabbitMQ metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"kubedb.com/v1alpha2","kind":"RabbitMQ","metadata":{"annotations":{},"name":"rm-quickstart","namespace":"demo"},"spec":{"podTemplate":{"spec":{"containers":[{"name":"rabbitmq","resources":{"limits":{"cpu":"2","memory":"2Gi"},"requests":{"cpu":"0.5","memory":"1Gi"}}}]}},"replicas":3,"serviceTemplates":[{"alias":"primary","spec":{"type":"LoadBalancer"}}],"storage":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}},"storageClassName":"standard"},"storageType":"Durable","deletionPolicy":"WipeOut","version":"3.12.12"}} - creationTimestamp: "2024-05-07T10:25:35Z" + {"apiVersion":"kubedb.com/v1alpha2","kind":"RabbitMQ","metadata":{"annotations":{},"name":"rm-quickstart","namespace":"demo"},"spec":{"deletionPolicy":"WipeOut","podTemplate":{"spec":{"containers":[{"name":"rabbitmq","resources":{"limits":{"cpu":2,"memory":"2Gi"},"requests":{"cpu":0.5,"memory":"1Gi"}}}]}},"replicas":3,"serviceTemplates":[{"alias":"primary","spec":{"type":"LoadBalancer"}}],"storage":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}},"storageClassName":"linode-block-storage"},"storageType":"Durable","version":"3.13.2"}} + creationTimestamp: "2024-09-10T09:23:57Z" finalizers: - - kubedb.com/rabbitmq + - kubedb.com/rabbitmq generation: 3 name: rm-quickstart namespace: demo - resourceVersion: "390056" - uid: 37dd5c9f-2df3-492e-a828-309abf580cc6 + resourceVersion: "58864" + uid: f3a948e4-b5c3-4327-b65e-b170fd744e89 spec: authSecret: name: rm-quickstart-admin-cred + deletionPolicy: WipeOut healthChecker: failureThreshold: 3 - periodSeconds: 20 + periodSeconds: 10 timeoutSeconds: 10 podTemplate: controller: {} metadata: {} spec: containers: - - name: rabbitmq - resources: - limits: - cpu: "2" - memory: 2Gi - requests: - cpu: 500m - memory: 1Gi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsNonRoot: true - runAsUser: 999 - seccompProfile: - type: RuntimeDefault + - name: rabbitmq + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault initContainers: - - name: rabbitmq-init - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsNonRoot: true - runAsUser: 999 - seccompProfile: - type: RuntimeDefault + - name: rabbitmq-init + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + podPlacementPolicy: + name: default securityContext: fsGroup: 999 replicas: 3 serviceTemplates: - - alias: primary - metadata: {} - spec: - type: LoadBalancer + - alias: primary + metadata: {} + spec: + type: LoadBalancer storage: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: standard storageType: Durable - deletionPolicy: WipeOut - version: 3.12.12 + version: 3.13.2 status: conditions: - - lastTransitionTime: "2024-05-07T10:25:36Z" - message: 'The KubeDB operator has started the provisioning of Rabbitmq: demo/rm-quickstart' - observedGeneration: 2 - reason: DatabaseProvisioningStartedSuccessfully - status: "True" - type: ProvisioningStarted - - lastTransitionTime: "2024-05-07T10:35:34Z" - message: All desired replicas are ready. - reason: AllReplicasReady - status: "True" - type: ReplicaReady - - lastTransitionTime: "2024-05-07T10:33:20Z" - message: 'The Rabbitmq: demo/rm-quickstart is accepting client requests' - observedGeneration: 3 - reason: DatabaseAcceptingConnectionRequest - status: "True" - type: AcceptingConnection - - lastTransitionTime: "2024-05-07T10:26:20Z" - message: Ready for publishing messages - observedGeneration: 3 - reason: Successfully publishing messages - status: "True" - type: DatabaseWriteAccess - - lastTransitionTime: "2024-05-07T10:33:20Z" - message: 'The Rabbitmq: demo/rm-quickstart is ready.' - observedGeneration: 3 - reason: ReadinessCheckSucceeded - status: "True" - type: Ready - - lastTransitionTime: "2024-05-07T10:26:24Z" - message: 'The Rabbitmq: demo/rm-quickstart is successfully provisioned.' - observedGeneration: 3 - reason: DatabaseSuccessfullyProvisioned - status: "True" - type: Provisioned - - lastTransitionTime: "2024-05-07T10:26:40Z" - message: Ready for Consuming messages - observedGeneration: 3 - reason: Successfully Consuming messages - status: "True" - type: DatabaseReadAccess + - lastTransitionTime: "2024-09-10T09:23:57Z" + message: 'The KubeDB operator has started the provisioning of Rabbitmq: demo/rm-quickstart' + observedGeneration: 2 + reason: DatabaseProvisioningStartedSuccessfully + status: "True" + type: ProvisioningStarted + - lastTransitionTime: "2024-09-10T09:32:52Z" + message: All replicas are ready + observedGeneration: 3 + reason: AllReplicasReady + status: "True" + type: ReplicaReady + - lastTransitionTime: "2024-09-10T09:25:46Z" + message: 'The Rabbitmq: demo/rm-quickstart is accepting client requests' + observedGeneration: 3 + reason: DatabaseAcceptingConnectionRequest + status: "True" + type: AcceptingConnection + - lastTransitionTime: "2024-09-10T09:25:46Z" + message: Ready for publishing messages + observedGeneration: 3 + reason: Successfully publishing messages + status: "True" + type: DatabaseWriteAccess + - lastTransitionTime: "2024-09-10T09:25:46Z" + message: 'The Rabbitmq: demo/rm-quickstart is ready.' + observedGeneration: 3 + reason: ReadinessCheckSucceeded + status: "True" + type: Ready + - lastTransitionTime: "2024-09-10T09:25:47Z" + message: 'The Rabbitmq: demo/rm-quickstart is successfully provisioned.' + observedGeneration: 3 + reason: DatabaseSuccessfullyProvisioned + status: "True" + type: Provisioned + - lastTransitionTime: "2024-09-10T09:25:56Z" + message: Ready for Consuming messages + observedGeneration: 3 + reason: Successfully Consuming messages + status: "True" + type: DatabaseReadAccess phase: Ready ``` @@ -278,7 +282,7 @@ password We can check client connectivity using an opensource load-testing tool called `perf-test`. It runs producers and consumers to continuously publish and consume messages in RabbitMQ cluster. Here's how to run it on kubernetes using the credentials and the address for operator generated primary service. ```bash -kubectl run perf-test --image=pivotalrabbitmq/perf-test -- --uri "amqp://admin:password@rm-quickstart.demo.svc:5672" +kubectl run perf-test --image=pivotalrabbitmq/perf-test -- --uri "amqp://admin:password@rm-quickstart.demo.svc:5672/" ``` You can check the log for this pod which shows publish and consume rates of messages in RabbitMQ. @@ -301,10 +305,10 @@ id: test-104606-706, time 9.000 s, sent: 29706 msg/s, received: 31375 msg/s, min id: test-104606-706, time 10.000 s, sent: 15903 msg/s, received: 26711 msg/s, min/median/75th/95th/99th consumer latency: 1569546/1884700/1992762/2096417/2136613 µs ``` -You can also connect with the RabbitMQ Management UI. It can be accessed through Primary service's 15672 Port or from a localhost port if the port is forwarded. +You can also connect with the RabbitMQ Management UI. It can be accessed through Dashboard service's 15672 Port or from a localhost port if the port is forwarded. ```bash -$ kubectl port-forward -n demo svc/rm-quickstart 15672 +$ kubectl port-forward -n demo svc/rm-quickstart-dashboard 15672 Forwarding from 127.0.0.1:15672 -> 15672 Forwarding from [::1]:15672 -> 15672 ``` @@ -381,7 +385,7 @@ Now, run the following command to get all RabbitMQ resources in `demo` namespace ```bash $ kubectl get petset,svc,secret,pvc -n demo NAME TYPE DATA AGE -secret/rm-quickstart-root-cred kubernetes.io/basic-auth 2 17m +secret/rm-quickstart-admin-cred kubernetes.io/basic-auth 2 17m ``` From the above output, you can see that all RabbitMQ resources(`PetSet`, `Service`, `PVCs` etc.) are deleted except `Secret`. diff --git a/docs/guides/rabbitmq/reconfigure-tls/_index.md b/docs/guides/rabbitmq/reconfigure-tls/_index.md index 3e9d2e14a1..2fc9ee6339 100644 --- a/docs/guides/rabbitmq/reconfigure-tls/_index.md +++ b/docs/guides/rabbitmq/reconfigure-tls/_index.md @@ -2,9 +2,9 @@ title: Reconfigure RabbitMQ TLS/SSL menu: docs_{{ .version }}: - identifier: mg-reconfigure-tls + identifier: rm-reconfigure-tls name: Reconfigure TLS/SSL - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 46 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/reconfigure-tls/overview.md b/docs/guides/rabbitmq/reconfigure-tls/overview.md index d9736e40da..dfec034246 100644 --- a/docs/guides/rabbitmq/reconfigure-tls/overview.md +++ b/docs/guides/rabbitmq/reconfigure-tls/overview.md @@ -1,10 +1,10 @@ --- -title: Reconfiguring TLS of RabbitMQ Database +title: Reconfiguring TLS of RabbitMQ menu: docs_{{ .version }}: - identifier: mg-reconfigure-tls-overview + identifier: rm-reconfigure-tls-overview name: Overview - parent: mg-reconfigure-tls + parent: rm-reconfigure-tls weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -19,18 +19,13 @@ This guide will give an overview on how KubeDB Ops-manager operator reconfigures ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Reconfiguring RabbitMQ TLS Configuration Process Works The following diagram shows how KubeDB Ops-manager operator reconfigures TLS of a `RabbitMQ` database. Open the image in a new tab to see the enlarged version. -
-  Reconfiguring TLS process of RabbitMQ -
Fig: Reconfiguring TLS process of RabbitMQ
-
- The Reconfiguring RabbitMQ TLS process consists of the following steps: 1. At first, a user creates a `RabbitMQ` Custom Resource Object (CRO). diff --git a/docs/guides/rabbitmq/reconfigure-tls/reconfigure-tls.md b/docs/guides/rabbitmq/reconfigure-tls/reconfigure-tls.md index 8bea83e026..1eef398986 100644 --- a/docs/guides/rabbitmq/reconfigure-tls/reconfigure-tls.md +++ b/docs/guides/rabbitmq/reconfigure-tls/reconfigure-tls.md @@ -2,9 +2,9 @@ title: Reconfigure RabbitMQ TLS/SSL Encryption menu: docs_{{ .version }}: - identifier: mg-reconfigure-tls-rs + identifier: rm-reconfigure-tls-ops name: Reconfigure RabbitMQ TLS/SSL Encryption - parent: mg-reconfigure-tls + parent: rm-reconfigure-tls weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -31,7 +31,7 @@ KubeDB supports reconfigure i.e. add, remove, update and rotation of TLS/SSL cer namespace/demo created ``` -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +> Note: YAML files used in this tutorial are stored in [docs/examples/rabbitmq](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Add TLS to a RabbitMQ database @@ -45,13 +45,11 @@ In this section, we are going to deploy a RabbitMQ Replicaset database without T apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: mg-rs + name: rm namespace: demo spec: - version: "4.4.26" + version: "3.13.2" replicas: 3 - replicaSet: - name: rs0 storage: storageClassName: "standard" accessModes: @@ -64,160 +62,25 @@ spec: Let's create the `RabbitMQ` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/mg-replicaset.yaml -RabbitMQ.kubedb.com/mg-rs created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/rm.yaml +rabbitmq.kubedb.com/rm created ``` -Now, wait until `mg-replicaset` has status `Ready`. i.e, +Now, wait until `rm` has status `Ready`. i.e, ```bash -$ kubectl get mg -n demo +$ kubectl get rm -n demo NAME VERSION STATUS AGE -mg-rs 4.4.26 Ready 10m - -$ kubectl dba describe RabbitMQ mg-rs -n demo -Name: mg-rs -Namespace: demo -CreationTimestamp: Thu, 11 Mar 2021 13:25:05 +0600 -Labels: -Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"kubedb.com/v1alpha2","kind":"RabbitMQ","metadata":{"annotations":{},"name":"mg-rs","namespace":"demo"},"spec":{"replicaSet":{"name":"rs0"... -Replicas: 3 total -Status: Ready -StorageType: Durable -Volume: - StorageClass: standard - Capacity: 1Gi - Access Modes: RWO -Paused: false -Halted: false -Termination Policy: Delete - -StatefulSet: - Name: mg-rs - CreationTimestamp: Thu, 11 Mar 2021 13:25:05 +0600 - Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-rs - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com - Annotations: - Replicas: 824639275080 desired | 3 total - Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed - -Service: - Name: mg-rs - Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-rs - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com - Annotations: - Type: ClusterIP - IP: 10.96.70.27 - Port: primary 27017/TCP - TargetPort: db/TCP - Endpoints: 10.244.0.63:27017 - -Service: - Name: mg-rs-pods - Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-rs - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com - Annotations: - Type: ClusterIP - IP: None - Port: db 27017/TCP - TargetPort: db/TCP - Endpoints: 10.244.0.63:27017,10.244.0.65:27017,10.244.0.67:27017 - -Auth Secret: - Name: mg-rs-auth - Labels: app.kubernetes.io/component=database - app.kubernetes.io/instance=mg-rs - app.kubernetes.io/managed-by=kubedb.com - app.kubernetes.io/name=RabbitMQs.kubedb.com - Annotations: - Type: Opaque - Data: - password: 16 bytes - username: 4 bytes - -AppBinding: - Metadata: - Annotations: - kubectl.kubernetes.io/last-applied-configuration: {"apiVersion":"kubedb.com/v1alpha2","kind":"RabbitMQ","metadata":{"annotations":{},"name":"mg-rs","namespace":"demo"},"spec":{"replicaSet":{"name":"rs0"},"replicas":3,"storage":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}},"storageClassName":"standard"},"version":"4.4.26"}} - - Creation Timestamp: 2021-03-11T07:26:44Z - Labels: - app.kubernetes.io/component: database - app.kubernetes.io/instance: mg-rs - app.kubernetes.io/managed-by: kubedb.com - app.kubernetes.io/name: RabbitMQs.kubedb.com - Name: mg-rs - Namespace: demo - Spec: - Client Config: - Service: - Name: mg-rs - Port: 27017 - Scheme: RabbitMQ - Parameters: - API Version: config.kubedb.com/v1alpha1 - Kind: MongoConfiguration - Replica Sets: - host-0: rs0/mg-rs-0.mg-rs-pods.demo.svc,mg-rs-1.mg-rs-pods.demo.svc,mg-rs-2.mg-rs-pods.demo.svc - Stash: - Addon: - Backup Task: - Name: RabbitMQ-backup-4.4.6-v6 - Restore Task: - Name: RabbitMQ-restore-4.4.6-v6 - Secret: - Name: mg-rs-auth - Type: kubedb.com/RabbitMQ - Version: 4.4.26 - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Successful 14m RabbitMQ operator Successfully created stats service - Normal Successful 14m RabbitMQ operator Successfully created Service - Normal Successful 14m RabbitMQ operator Successfully stats service - Normal Successful 14m RabbitMQ operator Successfully stats service - Normal Successful 13m RabbitMQ operator Successfully stats service - Normal Successful 13m RabbitMQ operator Successfully stats service - Normal Successful 13m RabbitMQ operator Successfully stats service - Normal Successful 13m RabbitMQ operator Successfully stats service - Normal Successful 13m RabbitMQ operator Successfully stats service - Normal Successful 12m RabbitMQ operator Successfully stats service - Normal Successful 12m RabbitMQ operator Successfully patched StatefulSet demo/mg-rs -``` - -Now, we can connect to this database through [mongo-shell](https://docs.RabbitMQ.com/v4.2/mongo/) and verify that the TLS is disabled. +rm 3.13.2 Ready 10m ```bash -$ kubectl get secrets -n demo mg-rs-auth -o jsonpath='{.data.\username}' | base64 -d +$ kubectl get secrets -n demo rm-admin-cred -o jsonpath='{.data.\username}' | base64 -d root -$ kubectl get secrets -n demo mg-rs-auth -o jsonpath='{.data.\password}' | base64 -d +$ kubectl get secrets -n demo rm-admin-cred -o jsonpath='{.data.\password}' | base64 -d U6(h_pYrekLZ2OOd -$ kubectl exec -it mg-rs-0 -n demo -- mongo admin -u root -p 'U6(h_pYrekLZ2OOd' -rs0:PRIMARY> db.adminCommand({ getParameter:1, sslMode:1 }) -{ - "sslMode" : "disabled", - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1615468344, 1), - "signature" : { - "hash" : BinData(0,"Xdclj9Y67WKZ/oTDGT/E1XzOY28="), - "keyId" : NumberLong("6938294279689207810") - } - }, - "operationTime" : Timestamp(1615468344, 1) -} -``` - We can verify from the above output that TLS is disabled for this database. ### Create Issuer/ ClusterIssuer @@ -238,7 +101,7 @@ writing new private key to './ca.key' - Now we are going to create a ca-secret using the certificate files that we have just generated. ```bash -$ kubectl create secret tls mongo-ca \ +$ kubectl create secret tls rabbitmq-ca \ --cert=ca.crt \ --key=ca.key \ --namespace=demo @@ -261,8 +124,8 @@ spec: Let's apply the `YAML` file: ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/issuer.yaml -issuer.cert-manager.io/mg-issuer created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/issuer.yaml +issuer.cert-manager.io/rm-issuer created ``` ### Create RabbitMQOpsRequest @@ -273,27 +136,24 @@ In order to add TLS to the database, we have to create a `RabbitMQOpsRequest` CR apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest metadata: - name: mops-add-tls + name: rmops-add-tls namespace: demo spec: type: ReconfigureTLS databaseRef: - name: mg-rs + name: rm tls: issuerRef: - name: mg-issuer + name: rm-issuer kind: Issuer apiGroup: "cert-manager.io" certificates: - alias: client subject: organizations: - - mongo + - rabbitmq organizationalUnits: - client - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 timeout: 5m apply: IfReady ``` @@ -303,13 +163,13 @@ Here, - `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `mg-rs` database. - `spec.type` specifies that we are performing `ReconfigureTLS` on our database. - `spec.tls.issuerRef` specifies the issuer name, kind and api group. -- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#spectls). +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/rabbitmq/concepts/rabbitmq.md#spectls). Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/mops-add-tls.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-add-tls created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/rmops-add-tls.yaml +rabbitmqopsrequest.ops.kubedb.com/rmops-add-tls created ``` #### Verify TLS Enabled Successfully @@ -326,8 +186,8 @@ mops-add-tls ReconfigureTLS Successful 91s We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed. ```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-add-tls -Name: mops-add-tls +$ kubectl describe rabbitmqopsrequest -n demo rmops-add-tls +Name: rmops-add-tls Namespace: demo Labels: Annotations: @@ -567,7 +427,7 @@ Metadata: Operation: Update Time: 2021-03-11T16:17:55Z Resource Version: 521643 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-rotate + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/rabbitmqopsrequests/mops-rotate UID: 6d96ead2-a868-47d8-85fb-77eecc9a96b4 Spec: Database Ref: @@ -621,7 +481,7 @@ Now, let's check the expiration date of the certificate. ```bash $ kubectl exec -it mg-rs-2 -n demo bash -root@mg-rs-2:/# openssl x509 -in /var/run/RabbitMQ/tls/client.pem -inform PEM -enddate -nameopt RFC2253 -noout +root@mg-rs-2:/# openssl x509 -in /var/run/rabbitmq/tls/client.pem -inform PEM -enddate -nameopt RFC2253 -noout notAfter=Jun 9 16:17:55 2021 GMT ``` @@ -658,18 +518,18 @@ Now, Let's create a new `Issuer` using the `mongo-new-ca` secret that we have ju apiVersion: cert-manager.io/v1 kind: Issuer metadata: - name: mg-new-issuer + name: rm-new-issuer namespace: demo spec: ca: - secretName: mongo-new-ca + secretName: rm-new-ca ``` Let's apply the `YAML` file: ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/new-issuer.yaml -issuer.cert-manager.io/mg-new-issuer created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/reconfigure-tls/new-issuer.yaml +issuer.cert-manager.io/rm-new-issuer created ``` ### Create RabbitMQOpsRequest @@ -685,10 +545,10 @@ metadata: spec: type: ReconfigureTLS databaseRef: - name: mg-rs + name: rm tls: issuerRef: - name: mg-new-issuer + name: rm-new-issuer kind: Issuer apiGroup: "cert-manager.io" ``` @@ -702,8 +562,8 @@ Here, Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/mops-change-issuer.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-change-issuer created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/reconfigure-tls/rmops-change-issuer.yaml +rabbitmqopsrequest.ops.kubedb.com/mops-change-issuer created ``` #### Verify Issuer is changed successfully @@ -766,7 +626,7 @@ Metadata: Operation: Update Time: 2021-03-11T16:27:47Z Resource Version: 523903 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-change-issuer + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/rabbitmqopsrequests/mops-change-issuer UID: cdfe8a7d-52ef-466c-a5dd-97e74ad598ca Spec: Database Ref: @@ -817,7 +677,7 @@ Now, Let's exec into a database node and find out the ca subject to see if it ma ```bash $ kubectl exec -it mg-rs-2 -n demo bash -root@mgo-rs-tls-2:/$ openssl x509 -in /var/run/RabbitMQ/tls/ca.crt -inform PEM -subject -nameopt RFC2253 -noout +root@mgo-rs-tls-2:/$ openssl x509 -in /var/run/rabbitmq/tls/ca.crt -inform PEM -subject -nameopt RFC2253 -noout subject=O=kubedb-updated,CN=ca-updated ``` @@ -854,8 +714,8 @@ Here, Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure-tls/mops-remove.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-remove created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/reconfigure-tls/mops-remove.yaml +rabbitmqopsrequest.ops.kubedb.com/mops-remove created ``` #### Verify TLS Removed Successfully @@ -988,19 +848,16 @@ So, we can see from the above that, output that tls is disabled successfully. To cleanup the Kubernetes resources created by this tutorial, run: ```bash -kubectl delete RabbitMQ -n demo mg-rs -kubectl delete issuer -n demo mg-issuer mg-new-issuer -kubectl delete RabbitMQopsrequest mops-add-tls mops-remove mops-rotate mops-change-issuer +kubectl delete rabbitmq -n demo rm +kubectl delete issuer -n demo rm-issuer rm-new-issuer +kubectl delete rabbitmqopsrequest rmops-add-tls rmops-remove rmops-rotate rmops-change-issuer kubectl delete ns demo ``` ## Next Steps -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/reconfigure/_index.md b/docs/guides/rabbitmq/reconfigure/_index.md index f37e874950..9bb4cf7696 100644 --- a/docs/guides/rabbitmq/reconfigure/_index.md +++ b/docs/guides/rabbitmq/reconfigure/_index.md @@ -2,9 +2,9 @@ title: Reconfigure menu: docs_{{ .version }}: - identifier: mg-reconfigure + identifier: rm-reconfigure name: Reconfigure - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 46 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/rabbitmq/reconfigure/overview.md b/docs/guides/rabbitmq/reconfigure/overview.md index 5d1062a4b7..1229108347 100644 --- a/docs/guides/rabbitmq/reconfigure/overview.md +++ b/docs/guides/rabbitmq/reconfigure/overview.md @@ -2,9 +2,9 @@ title: Reconfiguring RabbitMQ menu: docs_{{ .version }}: - identifier: mg-reconfigure-overview + identifier: rm-reconfigure-overview name: Overview - parent: mg-reconfigure + parent: rm-reconfigure weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -14,20 +14,20 @@ section_menu_id: guides # Reconfiguring RabbitMQ -This guide will give an overview on how KubeDB Ops-manager operator reconfigures `RabbitMQ` database components such as ReplicaSet, Shard, ConfigServer, Mongos, etc. +This guide will give an overview on how KubeDB Ops-manager operator reconfigures `RabbitMQ` cluster. ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) -## How Reconfiguring RabbitMQ Process Works +## How does Reconfiguring RabbitMQ Process Works The following diagram shows how KubeDB Ops-manager operator reconfigures `RabbitMQ` database components. Open the image in a new tab to see the enlarged version.
-  Reconfiguring process of RabbitMQ + Reconfiguring process of RabbitMQ
Fig: Reconfiguring process of RabbitMQ
@@ -39,7 +39,7 @@ The Reconfiguring RabbitMQ process consists of the following steps: 3. When the operator finds a `RabbitMQ` CR, it creates required number of `StatefulSets` and related necessary stuff like secrets, services, etc. -4. Then, in order to reconfigure the various components (ie. ReplicaSet, Shard, ConfigServer, Mongos, etc.) of the `RabbitMQ` database the user creates a `RabbitMQOpsRequest` CR with desired information. +4. Then, in order to reconfigure the `RabbitMQ` database the user creates a `RabbitMQOpsRequest` CR with desired information. 5. `KubeDB` Ops-manager operator watches the `RabbitMQOpsRequest` CR. diff --git a/docs/guides/rabbitmq/reconfigure/reconfigure.md b/docs/guides/rabbitmq/reconfigure/reconfigure.md new file mode 100644 index 0000000000..d1c1900fb6 --- /dev/null +++ b/docs/guides/rabbitmq/reconfigure/reconfigure.md @@ -0,0 +1,329 @@ +--- +title: Reconfigure RabbitMQ Cluster +menu: + docs_{{ .version }}: + identifier: rm-reconfigure-cluster + name: reconfigure-cluster + parent: rm-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure RabbitMQ Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a RabbitMQ cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/rabbitmq/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [examples](/docs/examples/rabbitmq) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `RabbitMQ` cluster using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to reconfigure its configuration. + +### Prepare RabbitMQ Standalone Database + +Now, we are going to deploy a `RabbitMQ` cluster with version `3.13.2`. + +### Deploy RabbitMQ standalone + +At first, we will create `rabbitmq.conf` file containing required configuration settings. + +```ini +$ cat rabbitmq.conf +default_vhost = /customvhost +``` +Here, `default_vhost` is set to `/customvhost` instead of the default vhost `/`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo rabbit-custom-config --from-file=./rabbitmq.conf +secret/rabbit-custom-config created +``` + +In this section, we are going to create a RabbitMQ object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `RabbitMQ` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rm-cluster + namespace: demo +spec: + version: "3.13.2" + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configSecret: + name: rabbit-custom-config +``` + +Let's create the `RabbitMQ` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml +rabbitmq.kubedb.com/rm-cluster created +``` + +Now, wait until `rm-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get rm -n demo +NAME TYPE VERSION STATUS AGE +rm-cluster kubedb.com/v1alpha2 3.13.2 Ready 79m +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a RabbitMQ instance, +```bash +$ kubectl get secrets -n demo rm-cluster-admin-cred -o jsonpath='{.data.\username}' | base64 -d +admin + +$ kubectl get secrets -n demo rm-cluster-admin-cred -o jsonpath='{.data.\password}' | base64 -d +m6lXjZugrC4VEpB8 +``` + +Now let's check the configuration we have provided by using rabbitmq's inbuilt cli. + +```bash +$ kubectl exec -it -n demo rm-cluster-0 -- bash +Defaulted container "rabbitmq" out of: rabbitmq, rabbitmq-init (init) +rm-cluster-0:/$ rabbitmqctl list_vhosts +Listing vhosts ... +name +/customvhost +``` + +Provided custom vhost is there and is defaulted. + +### Reconfigure using new secret + +Now we will update this default vhost to `/newvhost` using Reconfigure Ops-Request. + +Now, Let's edit the `rabbitmq.conf` file containing required configuration settings. + +```bash +$ echo "default_vhost = /newvhost" > rabbitmq.conf +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./rabbitmq.conf +secret/new-custom-config created +``` + +#### Create RabbitMQOpsRequest + +Now, we will use this secret to replace the previous secret using a `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: RabbitMQOpsRequest +metadata: + name: reconfigure-rm-cluster + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: rm-cluster + configuration: + configSecret: + name: new-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-standalone` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configSecret.name` specifies the name of the new secret. +- Have a look [here](/docs/guides/rabbitmq/concepts/opsrequest.md#specconfiguration) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +Let's create the `RabbitMQOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml +rabbitmqopsrequest.ops.kubedb.com/reconfigure-rm-cluster created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `RabbitMQ` object. + +Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, + +```bash +$ watch kubectl get rabbitmqopsrequest -n demo +Every 2.0s: kubectl get rabbitmqopsrequest -n demo +NAME TYPE STATUS AGE +reconfigure-rm-cluster Reconfigure Successful 3m +``` + +We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe rabbitmqopsrequest -n demo reconfigure-rm-cluster +Name: reconfigure-rm-cluster +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: RabbitMQOpsRequest +Metadata: + Creation Timestamp: 2024-09-10T11:09:16Z + Generation: 1 + Resource Version: 70651 + UID: 5c99031f-6604-48ac-b700-96f896c5d0b3 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-custom-config + Database Ref: + Name: rm-cluster + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-09-10T11:09:16Z + Message: RabbitMQ ops-request has started to reconfigure RabbitMQ nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-09-10T11:09:24Z + Message: successfully reconciled the RabbitMQ with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-09-10T11:09:29Z + Message: get pod; ConditionStatus:True; PodName:rm-cluster-0 + Observed Generation: 1 + Status: True + Type: GetPod--rm-cluster-0 + Last Transition Time: 2024-09-10T11:09:29Z + Message: evict pod; ConditionStatus:True; PodName:rm-cluster-0 + Observed Generation: 1 + Status: True + Type: EvictPod--rm-cluster-0 + Last Transition Time: 2024-09-10T11:09:34Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-09-10T11:09:49Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-09-10T11:09:50Z + Message: Successfully completed reconfigure RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 6m13s KubeDB Ops-manager Operator Start processing for RabbitMQOpsRequest: demo/reconfigure-rm-cluster + Normal Starting 6m13s KubeDB Ops-manager Operator Pausing RabbitMQ databse: demo/rm-cluster + Normal Successful 6m13s KubeDB Ops-manager Operator Successfully paused RabbitMQ database: demo/rm-cluster for RabbitMQOpsRequest: reconfigure + Normal UpdatePetSets 6m5s KubeDB Ops-manager Operator successfully reconciled the RabbitMQ with new configure + Warning get pod; ConditionStatus:True; PodName:rm-cluster-0 6m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:rm-cluster-0 + Warning evict pod; ConditionStatus:True; PodName:rm-cluster-0 6m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:rm-cluster-0 + Warning running pod; ConditionStatus:False 5m55s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Normal RestartNodes 5m40s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5m40s KubeDB Ops-manager Operator Resuming RabbitMQ database: demo/rm-cluster + Normal Successful 5m39s KubeDB Ops-manager Operator Successfully resumed RabbitMQ database: demo/rm-cluster for RabbitMQOpsRequest: reconfigure-rm-cluster +``` + +Now let's check the configuration we have provided after reconfiguration. + +```bash +$ kubectl exec -it -n demo rm-cluster-0 -- bash +Defaulted container "rabbitmq" out of: rabbitmq, rabbitmq-init (init) +rm-cluster-0:/$ rabbitmqctl list_vhosts +Listing vhosts ... +name +/newvhost +/customvhost +``` +As we can see from the configuration of running RabbitMQ, `/newvhost` is in the list of vhosts. + +### Reconfigure using apply config + +Let's say you are in a rush or, don't want to create a secret for updating configuration. You can directly do that using the following manifest. + +#### Create RabbitMQOpsRequest + +Now, we will use the new configuration in the `configuration.applyConfig` field in the `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: RabbitMQOpsRequest +metadata: + name: reconfigure-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: rm-cluster + configuration: + applyConfig: + rabbitmq.conf: | + default_vhost = /newvhost + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `rm-cluster` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `RabbitMQOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/opsrequests/rabbitmq-reconfigure-apply.yaml +rabbitmqopsrequest.ops.kubedb.com/reconfigure-apply created +``` + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete rm -n demo rm-cluster +kubectl delete rabbitmqopsrequest -n demo reconfigure-apply reconfigure-rm-cluster +``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/reconfigure/replicaset.md b/docs/guides/rabbitmq/reconfigure/replicaset.md deleted file mode 100644 index 00caf9a6e2..0000000000 --- a/docs/guides/rabbitmq/reconfigure/replicaset.md +++ /dev/null @@ -1,645 +0,0 @@ ---- -title: Reconfigure RabbitMQ Replicaset -menu: - docs_{{ .version }}: - identifier: mg-reconfigure-replicaset - name: Replicaset - parent: mg-reconfigure - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Reconfigure RabbitMQ Replicaset Database - -This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a RabbitMQ Replicaset. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [ReplicaSet](/docs/guides/RabbitMQ/clustering/replicaset.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Reconfigure Overview](/docs/guides/RabbitMQ/reconfigure/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -Now, we are going to deploy a `RabbitMQ` Replicaset using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to reconfigure its configuration. - -### Prepare RabbitMQ Replicaset - -Now, we are going to deploy a `RabbitMQ` Replicaset database with version `4.4.26`. - -### Deploy RabbitMQ - -At first, we will create `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 10000 -``` -Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. - -Now, we will create a secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf -secret/mg-custom-config created -``` - -In this section, we are going to create a RabbitMQ object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-replicaset - namespace: demo -spec: - version: "4.4.26" - replicas: 3 - replicaSet: - name: rs0 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - configSecret: - name: mg-custom-config -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mg-replicaset-config.yaml -RabbitMQ.kubedb.com/mg-replicaset created -``` - -Now, wait until `mg-replicaset` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 19m -``` - -Now, we will check if the database has started with the custom configuration we have provided. - -First we need to get the username and password to connect to a RabbitMQ instance, -```bash -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\password}' | base64 -d -nrKuxni0wDSMrgwy -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614668500, 1), - "signature" : { - "hash" : BinData(0,"7sh886HhsNYajGxYGp5Jxi52IzA="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614668500, 1) -} -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been set to `10000`. - -### Reconfigure using new config secret - -Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. - -Now, we will edit the `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 20000 -``` - -Then, we will create a new secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf -secret/new-custom-config created -``` - -#### Create RabbitMQOpsRequest - -Now, we will use this secret to replace the previous secret using a `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-replicaset - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-replicaset - configuration: - replicaSet: - configSecret: - name: new-custom-config - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-replicaset` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.customConfig.replicaSet.configSecret.name` specifies the name of the new secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-replicaset created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `RabbitMQ` object. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-replicaset Reconfigure Successful 113s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-replicaset -Name: mops-reconfigure-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T07:04:31Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:replicaSet: - .: - f:configSecret: - .: - f:name: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T07:04:31Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:replicaSet: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T07:04:31Z - Resource Version: 29869 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-reconfigure-replicaset - UID: 064733d6-19db-4153-82f7-bc0580116ee6 -Spec: - Apply: IfReady - Configuration: - Replica Set: - Config Secret: - Name: new-custom-config - Database Ref: - Name: mg-replicaset - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T07:04:31Z - Message: RabbitMQ ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T07:06:21Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureReplicaset - Status: True - Type: ReconfigureReplicaset - Last Transition Time: 2021-03-02T07:06:21Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 2m55s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-replicaset - Normal PauseDatabase 2m55s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-replicaset - Normal ReconfigureReplicaset 65s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ResumeDatabase 65s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-replicaset - Normal ResumeDatabase 65s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-replicaset - Normal Successful 65s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614668887, 1), - "signature" : { - "hash" : BinData(0,"5q35Y51+YpbVHFKoaU7lUWi38oY="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614668887, 1) -} -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been changed from `10000` to `20000`. So the reconfiguration of the database is successful. - - -### Reconfigure using apply config - -Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `RabbitMQOpsRequest`. This will merge the new config in the existing secret. - -#### Create RabbitMQOpsRequest - -Now, we will use the new configuration in the `applyConfig` field in the `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-apply-replicaset - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-replicaset - configuration: - replicaSet: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-replicaset` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.replicaSet.applyConfig` specifies the new configuration that will be merged in the existing secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-apply-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-apply-replicaset created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-apply-replicaset Reconfigure Successful 109s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-apply-replicaset -Name: mops-reconfigure-apply-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T07:09:39Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:replicaSet: - .: - f:applyConfig: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T07:09:39Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:replicaSet: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T07:09:39Z - Resource Version: 31005 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-reconfigure-apply-replicaset - UID: 0137442b-1b04-43ed-8de7-ecd913b44065 -Spec: - Apply: IfReady - Configuration: - Replica Set: - Apply Config: net: - maxIncomingConnections: 30000 - - Database Ref: - Name: mg-replicaset - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T07:09:39Z - Message: RabbitMQ ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T07:11:14Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureReplicaset - Status: True - Type: ReconfigureReplicaset - Last Transition Time: 2021-03-02T07:11:14Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 9m20s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-replicaset - Normal PauseDatabase 9m20s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-replicaset - Normal ReconfigureReplicaset 7m45s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-replicaset - Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-replicaset - Normal Successful 7m45s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 30000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614669580, 1), - "signature" : { - "hash" : BinData(0,"u/xTAa4aW/8bsRvBYPffwQCeTF0="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614669580, 1) -} -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been changed from `20000` to `30000`. So the reconfiguration of the database using the `applyConfig` field is successful. - - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete RabbitMQopsrequest -n demo mops-reconfigure-replicaset mops-reconfigure-apply-replicaset -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/reconfigure/sharding.md b/docs/guides/rabbitmq/reconfigure/sharding.md deleted file mode 100644 index bf973afdec..0000000000 --- a/docs/guides/rabbitmq/reconfigure/sharding.md +++ /dev/null @@ -1,571 +0,0 @@ ---- -title: Reconfigure RabbitMQ Sharded Cluster -menu: - docs_{{ .version }}: - identifier: mg-reconfigure-shard - name: Sharding - parent: mg-reconfigure - weight: 40 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Reconfigure RabbitMQ Shard - -This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a RabbitMQ shard. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Sharding](/docs/guides/RabbitMQ/clustering/sharding.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Reconfigure Overview](/docs/guides/RabbitMQ/reconfigure/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -Now, we are going to deploy a `RabbitMQ` sharded database using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to reconfigure its configuration. - -### Prepare RabbitMQ Shard - -Now, we are going to deploy a `RabbitMQ` sharded database with version `4.4.26`. - -### Deploy RabbitMQ database - -At first, we will create `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 10000 -``` -Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. - -Now, we will create a secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf -secret/mg-custom-config created -``` - -In this section, we are going to create a RabbitMQ object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sharding - namespace: demo -spec: - version: 4.4.26 - shardTopology: - configServer: - replicas: 3 - configSecret: - name: mg-custom-config - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - mongos: - replicas: 2 - configSecret: - name: mg-custom-config - shard: - replicas: 3 - shards: 2 - configSecret: - name: mg-custom-config - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mg-shard-config.yaml -RabbitMQ.kubedb.com/mg-sharding created -``` - -Now, wait until `mg-sharding` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sharding 4.4.26 Ready 3m23s -``` - -Now, we will check if the database has started with the custom configuration we have provided. - -First we need to get the username and password to connect to a RabbitMQ instance, -```bash -$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\password}' | base64 -d -Dv8F55zVNiEkhHM6 -``` - -Now let's connect to a RabbitMQ instance from each type of nodes and run a RabbitMQ internal command to check the configuration we have provided. - -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} - -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} - -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been set to `10000` in all nodes. - -### Reconfigure using new secret - -Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. - -Now, we will edit the `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 20000 -``` - -Then, we will create a new secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf -secret/new-custom-config created -``` - -#### Create RabbitMQOpsRequest - -Now, we will use this secret to replace the previous secret using a `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-shard - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-sharding - configuration: - shard: - configSecret: - name: new-custom-config - configServer: - configSecret: - name: new-custom-config - mongos: - configSecret: - name: new-custom-config - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-shard` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.shard.configSecret.name` specifies the name of the new secret for shard nodes. -- `spec.configuration.configServer.configSecret.name` specifies the name of the new secret for configServer nodes. -- `spec.configuration.mongos.configSecret.name` specifies the name of the new secret for mongos nodes. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -> **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-shard created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `RabbitMQ` object. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-shard Reconfigure Successful 3m8s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-shard - -``` - -Now let's connect to a RabbitMQ instance from each type of nodes and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet - { - "bindIp" : "0.0.0.0", - "maxIncomingConnections" : 20000, - "port" : 27017, - "ssl" : { - "mode" : "disabled" - } - } - -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet - { - "bindIp" : "0.0.0.0", - "maxIncomingConnections" : 20000, - "port" : 27017, - "ssl" : { - "mode" : "disabled" - } - } - -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet - { - "bindIp" : "0.0.0.0", - "maxIncomingConnections" : 20000, - "port" : 27017, - "ssl" : { - "mode" : "disabled" - } - } -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been changed from `10000` to `20000` in all type of nodes. So the reconfiguration of the database is successful. - -### Reconfigure using apply config - -Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `RabbitMQOpsRequest`. This will merge the new config in the existing secret. - -#### Create RabbitMQOpsRequest - -Now, we will use the new configuration in the `data` field in the `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-apply-shard - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-sharding - configuration: - shard: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - configServer: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - mongos: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-shard` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.shard.applyConfig` specifies the new configuration that will be merged in the existing secret for shard nodes. -- `spec.configuration.configServer.applyConfig` specifies the new configuration that will be merged in the existing secret for configServer nodes. -- `spec.configuration.mongos.applyConfig` specifies the new configuration that will be merged in the existing secret for mongos nodes. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -> **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-apply-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-apply-shard created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-apply-shard Reconfigure Successful 3m24s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-apply-shard -Name: mops-reconfigure-apply-shard -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T13:08:25Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:configServer: - .: - f:configSecret: - .: - f:name: - f:mongos: - .: - f:configSecret: - .: - f:name: - f:shard: - .: - f:configSecret: - .: - f:name: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T13:08:25Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:configServer: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:mongos: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:shard: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T13:08:25Z - Resource Version: 103635 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-reconfigure-apply-shard - UID: ab454bcb-164c-4fa2-9eaa-dd47c60fe874 -Spec: - Apply: IfReady - Configuration: - Config Server: - Apply Config: net: - maxIncomingConnections: 30000 - - Mongos: - Apply Config: net: - maxIncomingConnections: 30000 - - Shard: - Apply Config: net: - maxIncomingConnections: 30000 - - Database Ref: - Name: mg-sharding - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T13:08:25Z - Message: RabbitMQ ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T13:10:10Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureConfigServer - Status: True - Type: ReconfigureConfigServer - Last Transition Time: 2021-03-02T13:13:15Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureShard - Status: True - Type: ReconfigureShard - Last Transition Time: 2021-03-02T13:14:10Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureMongos - Status: True - Type: ReconfigureMongos - Last Transition Time: 2021-03-02T13:14:10Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 13m KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-sharding - Normal PauseDatabase 13m KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-sharding - Normal ReconfigureConfigServer 12m KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ReconfigureShard 9m7s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ReconfigureMongos 8m12s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ResumeDatabase 8m12s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-sharding - Normal ResumeDatabase 8m12s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-sharding - Normal Successful 8m12s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a RabbitMQ instance from each type of nodes and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} - -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} - -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet -{ - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } -} -``` - -As we can see from the configuration of ready RabbitMQ, the value of `maxIncomingConnections` has been changed from `20000` to `30000` in all nodes. So the reconfiguration of the database using the data field is successful. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-sharding -kubectl delete RabbitMQopsrequest -n demo mops-reconfigure-shard mops-reconfigure-apply-shard -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/reconfigure/standalone.md b/docs/guides/rabbitmq/reconfigure/standalone.md deleted file mode 100644 index 14c9291a9a..0000000000 --- a/docs/guides/rabbitmq/reconfigure/standalone.md +++ /dev/null @@ -1,590 +0,0 @@ ---- -title: Reconfigure Standalone RabbitMQ Database -menu: - docs_{{ .version }}: - identifier: mg-reconfigure-standalone - name: Standalone - parent: mg-reconfigure - weight: 20 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Reconfigure RabbitMQ Standalone Database - -This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a RabbitMQ standalone database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Reconfigure Overview](/docs/guides/RabbitMQ/reconfigure/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -Now, we are going to deploy a `RabbitMQ` standalone using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to reconfigure its configuration. - -### Prepare RabbitMQ Standalone Database - -Now, we are going to deploy a `RabbitMQ` standalone database with version `4.4.26`. - -### Deploy RabbitMQ standalone - -At first, we will create `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 10000 -``` -Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. - -Now, we will create a secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf -secret/mg-custom-config created -``` - -In this section, we are going to create a RabbitMQ object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-standalone - namespace: demo -spec: - version: "4.4.26" - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - configSecret: - name: mg-custom-config -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mg-standalone-config.yaml -RabbitMQ.kubedb.com/mg-standalone created -``` - -Now, wait until `mg-standalone` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-standalone 4.4.26 Ready 23s -``` - -Now, we will check if the database has started with the custom configuration we have provided. - -First we need to get the username and password to connect to a RabbitMQ instance, -```bash -$ kubectl get secrets -n demo mg-standalone-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-standalone-auth -o jsonpath='{.data.\password}' | base64 -d -m6lXjZugrC4VEpB8 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the configuration we have provided. - -```bash -$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "security" : { - "authorization" : "enabled" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1 -} -``` - -As we can see from the configuration of running RabbitMQ, the value of `maxIncomingConnections` has been set to `10000`. - -### Reconfigure using new secret - -Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. - -Now, we will edit the `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 20000 -``` - -Then, we will create a new secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf -secret/new-custom-config created -``` - -#### Create RabbitMQOpsRequest - -Now, we will use this secret to replace the previous secret using a `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-standalone - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-standalone - configuration: - standalone: - configSecret: - name: new-custom-config - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-standalone` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.standalone.configSecret.name` specifies the name of the new secret. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-standalone.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-standalone created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `RabbitMQ` object. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-standalone Reconfigure Successful 10m -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-standalone -Name: mops-reconfigure-standalone -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T15:04:45Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:standalone: - .: - f:configSecret: - .: - f:name: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T15:04:45Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:standalone: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T15:04:45Z - Resource Version: 125826 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-reconfigure-standalone - UID: f63bb606-9df5-4516-9901-97dfe5b46b15 -Spec: - Apply: IfReady - Configuration: - Standalone: - Config Secret: - Name: new-custom-config - Database Ref: - Name: mg-standalone - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T15:04:45Z - Message: RabbitMQ ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T15:05:10Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureStandalone - Status: True - Type: ReconfigureStandalone - Last Transition Time: 2021-03-02T15:05:10Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 60s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-standalone - Normal PauseDatabase 60s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-standalone - Normal ReconfigureStandalone 35s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ResumeDatabase 35s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-standalone - Normal ResumeDatabase 35s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-standalone - Normal Successful 35s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "security" : { - "authorization" : "enabled" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1 -} -``` - -As we can see from the configuration of running RabbitMQ, the value of `maxIncomingConnections` has been changed from `10000` to `20000`. So the reconfiguration of the database is successful. - - -### Reconfigure using apply config - -Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `RabbitMQOpsRequest`. This will merge the new config in the existing secret. - -#### Create RabbitMQOpsRequest - -Now, we will use the new configuration in the `data` field in the `RabbitMQOpsRequest` CR. The `RabbitMQOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-reconfigure-apply-standalone - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-standalone - configuration: - standalone: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-standalone` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.standalone.applyConfig` specifies the new configuration that will be merged in the existing secret. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reconfigure/mops-reconfigure-apply-standalone.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-reconfigure-apply-standalone created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-apply-standalone Reconfigure Successful 38s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-reconfigure-apply-standalone -Name: mops-reconfigure-apply-standalone -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T15:09:12Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:standalone: - .: - f:applyConfig: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T15:09:12Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:standalone: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T15:09:13Z - Resource Version: 126782 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-reconfigure-apply-standalone - UID: 33eea32f-e2af-4e36-b612-c528549e3d65 -Spec: - Apply: IfReady - Configuration: - Standalone: - Apply Config: net: - maxIncomingConnections: 30000 - - Database Ref: - Name: mg-standalone - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T15:09:13Z - Message: RabbitMQ ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T15:09:38Z - Message: Successfully Reconfigured RabbitMQ - Observed Generation: 1 - Reason: ReconfigureStandalone - Status: True - Type: ReconfigureStandalone - Last Transition Time: 2021-03-02T15:09:38Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 118s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-standalone - Normal PauseDatabase 118s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-standalone - Normal ReconfigureStandalone 93s KubeDB Ops-manager operator Successfully Reconfigured RabbitMQ - Normal ResumeDatabase 93s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-standalone - Normal ResumeDatabase 93s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-standalone - Normal Successful 93s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 30000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "security" : { - "authorization" : "enabled" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1 -} -``` - -As we can see from the configuration of running RabbitMQ, the value of `maxIncomingConnections` has been changed from `20000` to `30000`. So the reconfiguration of the database using the `applyConfig` field is successful. - - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-standalone -kubectl delete RabbitMQopsrequest -n demo mops-reconfigure-standalone mops-reconfigure-apply-standalone -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/reprovision/_index.md b/docs/guides/rabbitmq/reprovision/_index.md deleted file mode 100644 index a04125c1b4..0000000000 --- a/docs/guides/rabbitmq/reprovision/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Reprovision RabbitMQ -menu: - docs_{{ .version }}: - identifier: mg-reprovision - name: Reprovision - parent: mg-RabbitMQ-guides - weight: 46 -menu_name: docs_{{ .version }} ---- diff --git a/docs/guides/rabbitmq/reprovision/reprovision.md b/docs/guides/rabbitmq/reprovision/reprovision.md deleted file mode 100644 index 6f8881bff8..0000000000 --- a/docs/guides/rabbitmq/reprovision/reprovision.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: Reprovision RabbitMQ -menu: - docs_{{ .version }}: - identifier: mg-reprovision-details - name: Reprovision RabbitMQ - parent: mg-reprovision - weight: 10 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Reprovision RabbitMQ - -KubeDB supports reprovisioning the RabbitMQ database via a RabbitMQOpsRequest. Reprovisioning is useful if you want, for some reason, to deploy a new RabbitMQ with the same specifications. This tutorial will show you how to use that. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). - -- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. - -```bash - $ kubectl create ns demo - namespace/demo created -``` - -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Deploy RabbitMQ - -In this section, we are going to deploy a RabbitMQ database using KubeDB. - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mongo - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - podTemplate: - spec: - resources: - requests: - cpu: "300m" - memory: "300Mi" - replicas: 2 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - terminationPolicy: WipeOut - arbiter: {} - hidden: - replicas: 2 - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi -``` - -- `spec.replicaSet` represents the configuration for replicaset. - - `name` denotes the name of RabbitMQ replicaset. -- `spec.replicas` denotes the number of general members in `rs0` RabbitMQ replicaset. -- `spec.podTemplate` denotes specifications of all the 3 general replicaset members. -- `spec.ephemeralStorage` holds the emptyDir volume specifications. This storage spec will be passed to the StatefulSet created by KubeDB operator to run database pods. So, each members will have a pod of this ephemeral storage configuration. -- `spec.arbiter` denotes arbiter-node spec of the deployed RabbitMQ CRD. -- `spec.hidden` denotes hidden-node spec of the deployed RabbitMQ CRD. - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reprovision/mongo.yaml -RabbitMQ.kubedb.com/mongo created -``` - -## Apply Reprovision opsRequest - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: repro - namespace: demo -spec: - type: Reprovision - databaseRef: - name: mongo - apply: Always -``` - -- `spec.type` specifies the Type of the ops Request -- `spec.databaseRef` holds the name of the RabbitMQ database. The db should be available in the same namespace as the opsRequest -- `spec.apply` is set to Always to denote that, we want reprovisioning even if the db was not Ready. - -> Note: The method of reprovisioning the standalone & sharded db is exactly same as above. All you need, is to specify the corresponding RabbitMQ name in `spec.databaseRef.name` section. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/reprovision/ops.yaml -RabbitMQopsrequest.ops.kubedb.com/repro created -``` - -Now the Ops-manager operator will -1) Pause the DB -2) Delete all statefulsets -3) Remove `Provisioned` condition from db -4) Reconcile the db for start -5) Wait for DB to be Ready. - -```shell -$ kubectl get mgops -n demo -NAME TYPE STATUS AGE -repro Reprovision Successful 2m - - -$ kubectl get mgops -n demo -oyaml repro -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"RabbitMQOpsRequest","metadata":{"annotations":{},"name":"repro","namespace":"demo"},"spec":{"databaseRef":{"name":"mongo"},"type":"Reprovision"}} - creationTimestamp: "2022-10-31T09:50:35Z" - generation: 1 - name: repro - namespace: demo - resourceVersion: "743676" - uid: b3444d38-bef3-4043-925f-551fe6c86123 -spec: - apply: Always - databaseRef: - name: mongo - type: Reprovision -status: - conditions: - - lastTransitionTime: "2022-10-31T09:50:35Z" - message: RabbitMQ ops request is reprovisioning the database - observedGeneration: 1 - reason: Reprovision - status: "True" - type: Reprovision - - lastTransitionTime: "2022-10-31T09:50:45Z" - message: Successfully Deleted All the StatefulSets - observedGeneration: 1 - reason: DeleteStatefulSets - status: "True" - type: DeleteStatefulSets - - lastTransitionTime: "2022-10-31T09:52:05Z" - message: Database Phase is Ready - observedGeneration: 1 - reason: DatabaseReady - status: "True" - type: DatabaseReady - - lastTransitionTime: "2022-10-31T09:52:05Z" - message: Successfully Reprovisioned the database - observedGeneration: 1 - reason: Successful - status: "True" - type: Successful - observedGeneration: 1 - phase: Successful -``` - - -## Cleaning up - -To cleanup the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete RabbitMQopsrequest -n demo repro -kubectl delete RabbitMQ -n demo mongo -kubectl delete ns demo -``` - -## Next Steps - -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/restart/_index.md b/docs/guides/rabbitmq/restart/_index.md index 3c0b6e841c..004efea524 100644 --- a/docs/guides/rabbitmq/restart/_index.md +++ b/docs/guides/rabbitmq/restart/_index.md @@ -2,9 +2,9 @@ title: Restart RabbitMQ menu: docs_{{ .version }}: - identifier: mg-restart + identifier: rm-restart name: Restart - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 46 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/restart/restart.md b/docs/guides/rabbitmq/restart/restart.md index f12126bb6a..58c38e0625 100644 --- a/docs/guides/rabbitmq/restart/restart.md +++ b/docs/guides/rabbitmq/restart/restart.md @@ -2,9 +2,9 @@ title: Restart RabbitMQ menu: docs_{{ .version }}: - identifier: mg-restart-details + identifier: rm-restart-details name: Restart RabbitMQ - parent: mg-restart + parent: rm-restart weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -29,7 +29,7 @@ KubeDB supports restarting the RabbitMQ database via a RabbitMQOpsRequest. Resta namespace/demo created ``` -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +> Note: YAML files used in this tutorial are stored in [docs/examples/rabbitmq](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Deploy RabbitMQ @@ -39,19 +39,11 @@ In this section, we are going to deploy a RabbitMQ database using KubeDB. apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: mongo + name: rm namespace: demo spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - podTemplate: - spec: - resources: - requests: - cpu: "300m" - memory: "300Mi" - replicas: 2 + version: "3.13.2" + replicas: 3 storageType: Durable storage: storageClassName: "standard" @@ -60,32 +52,14 @@ spec: resources: requests: storage: 1Gi - terminationPolicy: WipeOut - arbiter: {} - hidden: - replicas: 2 - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi + deletionPolicy: WipeOut ``` -- `spec.replicaSet` represents the configuration for replicaset. - - `name` denotes the name of RabbitMQ replicaset. -- `spec.replicas` denotes the number of general members in `rs0` RabbitMQ replicaset. -- `spec.podTemplate` denotes specifications of all the 3 general replicaset members. -- `spec.ephemeralStorage` holds the emptyDir volume specifications. This storage spec will be passed to the StatefulSet created by KubeDB operator to run database pods. So, each members will have a pod of this ephemeral storage configuration. -- `spec.arbiter` denotes arbiter-node spec of the deployed RabbitMQ CRD. -- `spec.hidden` denotes hidden-node spec of the deployed RabbitMQ CRD. - Let's create the `RabbitMQ` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/restart/mongo.yaml -RabbitMQ.kubedb.com/mongo created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/restart/rm.yaml +rabbitmq.kubedb.com/rm created ``` ## Apply Restart opsRequest @@ -99,25 +73,22 @@ metadata: spec: type: Restart databaseRef: - name: mongo - readinessCriteria: - oplogMaxLagSeconds: 10 - objectsCountDiffPercentage: 15 + name: rm timeout: 3m apply: Always ``` - `spec.type` specifies the Type of the ops Request - `spec.databaseRef` holds the name of the RabbitMQ database. The db should be available in the same namespace as the opsRequest -- The meaning of`spec.readinessCriteria`, `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinessCriteria) +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/rabbitmq/concepts/opsrequest.md#spectimeout) > Note: The method of restarting the standalone & sharded db is exactly same as above. All you need, is to specify the corresponding RabbitMQ name in `spec.databaseRef.name` section. Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/restart/ops.yaml -RabbitMQopsrequest.ops.kubedb.com/restart created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/restart/ops.yaml +rabbitmqopsrequest.ops.kubedb.com/restart created ``` Now the Ops-manager operator will first restart the general secondary pods, then serially the arbiters, the hidden nodes, & lastly will restart the Primary of the database. @@ -179,18 +150,15 @@ status: To cleanup the Kubernetes resources created by this tutorial, run: ```bash -kubectl delete RabbitMQopsrequest -n demo restart -kubectl delete RabbitMQ -n demo mongo +kubectl delete rabbitmqopsrequest -n demo restart +kubectl delete rabbitmq -n demo rm kubectl delete ns demo ``` ## Next Steps -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/rabbitmq/monitoring/using-prometheus-operator.md). +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/scaling/_index.md b/docs/guides/rabbitmq/scaling/_index.md index e5cd7b6f39..0687a2c5c5 100644 --- a/docs/guides/rabbitmq/scaling/_index.md +++ b/docs/guides/rabbitmq/scaling/_index.md @@ -2,9 +2,9 @@ title: Scaling RabbitMQ menu: docs_{{ .version }}: - identifier: mg-scaling + identifier: rm-scaling name: Scaling - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 43 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/horizontal-scaling/_index.md b/docs/guides/rabbitmq/scaling/horizontal-scaling/_index.md index ecf4c604a7..eb8f443486 100644 --- a/docs/guides/rabbitmq/scaling/horizontal-scaling/_index.md +++ b/docs/guides/rabbitmq/scaling/horizontal-scaling/_index.md @@ -2,9 +2,9 @@ title: Horizontal Scaling menu: docs_{{ .version }}: - identifier: mg-horizontal-scaling + identifier: rm-horizontal-scaling name: Horizontal Scaling - parent: mg-scaling + parent: rm-scaling weight: 10 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/horizontal-scaling/horizontal-scaling.md b/docs/guides/rabbitmq/scaling/horizontal-scaling/horizontal-scaling.md new file mode 100644 index 0000000000..b918dcf20f --- /dev/null +++ b/docs/guides/rabbitmq/scaling/horizontal-scaling/horizontal-scaling.md @@ -0,0 +1,438 @@ +--- +title: Horizontal Scaling RabbitMQ +menu: + docs_{{ .version }}: + identifier: rm-horizontal-scaling-ops + name: HorizontalScaling OpsRequest + parent: rm-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Pgpool + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the replicaset of a Pgpool. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Pgpool](/docs/guides/pgpool/concepts/pgpool.md) + - [PgpoolOpsRequest](/docs/guides/pgpool/concepts/opsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/pgpool/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgpool](/docs/examples/pgpool) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on pgpool + +Here, we are going to deploy a `Pgpool` using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgpool/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +### Prepare Pgpool + +Now, we are going to deploy a `Pgpool` with version `4.5.0`. + +### Deploy Pgpool + +In this section, we are going to deploy a Pgpool. Then, in the next section we will scale the pgpool using `PgpoolOpsRequest` CRD. Below is the YAML of the `Pgpool` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Pgpool +metadata: + name: pp-horizontal + namespace: demo +spec: + version: "4.5.0" + replicas: 1 + postgresRef: + name: ha-postgres + namespace: demo + initConfig: + pgpoolConfig: + max_pool : 60 + deletionPolicy: WipeOut +``` +Here we are creating the pgpool with `max_pool=60`, it is necessary because we will up scale the pgpool replicas so for that we need larger `max_pool`. Let's create the `Pgpool` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgpool/scaling/pp-horizontal.yaml +pgpool.kubedb.com/pp-horizontal created +``` + +Now, wait until `pp-horizontal ` has status `Ready`. i.e, + +```bash +$ kubectl get pp -n demo +NAME TYPE VERSION STATUS AGE +pp-horizontal kubedb.com/v1alpha2 4.5.0 Ready 2m +``` + +Let's check the number of replicas this pgpool has from the Pgpool object, number of pods the petset have, + +```bash +$ kubectl get pgpool -n demo pp-horizontal -o json | jq '.spec.replicas' +1 + +$ kubectl get petset -n demo pp-horizontal -o json | jq '.spec.replicas' +1 +``` + +We can see from both command that the pgpool has 3 replicas. + +We are now ready to apply the `PgpoolOpsRequest` CR to scale this pgpool. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the pgpool to meet the desired number of replicas after scaling. + +#### Create PgpoolOpsRequest + +In order to scale up the replicas of the pgpool, we have to create a `PgpoolOpsRequest` CR with our desired replicas. Below is the YAML of the `PgpoolOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgpoolOpsRequest +metadata: + name: pgpool-horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pp-horizontal + horizontalScaling: + node: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `pp-horizontal` pgpool. +- `spec.type` specifies that we are performing `HorizontalScaling` on our pgpool. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `PgpoolOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgpool/scaling/horizontal-scaling/ppops-hscale-up-ops.yaml +pgpoolopsrequest.ops.kubedb.com/pgpool-horizontal-scale-up created +``` + +#### Verify replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Pgpool` object and related `PetSet`. + +Let's wait for `PgpoolOpsRequest` to be `Successful`. Run the following command to watch `PgpoolOpsRequest` CR, + +```bash +$ watch kubectl get pgpoolopsrequest -n demo +Every 2.0s: kubectl get pgpoolopsrequest -n demo +NAME TYPE STATUS AGE +pgpool-horizontal-scale-up HorizontalScaling Successful 2m49s +``` + +We can see from the above output that the `PgpoolOpsRequest` has succeeded. If we describe the `PgpoolOpsRequest` we will get an overview of the steps that were followed to scale the pgpool. + +```bash +$ kubectl describe pgpoolopsrequest -n demo pgpool-horizontal-scale-up +Name: pgpool-horizontal-scale-up +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgpoolOpsRequest +Metadata: + Creation Timestamp: 2024-07-17T08:35:13Z + Generation: 1 + Resource Version: 62002 + UID: ce44f7a1-e78d-4248-a691-62fe1efd11f3 +Spec: + Apply: IfReady + Database Ref: + Name: pp-horizontal + Horizontal Scaling: + Node: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-07-17T08:35:13Z + Message: Pgpool ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-07-17T08:35:16Z + Message: Successfully paused database + Observed Generation: 1 + Reason: DatabasePauseSucceeded + Status: True + Type: DatabasePauseSucceeded + Last Transition Time: 2024-07-17T08:35:41Z + Message: Successfully Scaled Up Node + Observed Generation: 1 + Reason: HorizontalScaleUp + Status: True + Type: HorizontalScaleUp + Last Transition Time: 2024-07-17T08:35:21Z + Message: patch petset; ConditionStatus:True; PodName:pp-horizontal-1 + Observed Generation: 1 + Status: True + Type: PatchPetset--pp-horizontal-1 + Last Transition Time: 2024-07-17T08:35:26Z + Message: is pod ready; ConditionStatus:True; PodName:pp-horizontal-1 + Observed Generation: 1 + Status: True + Type: IsPodReady--pp-horizontal-1 + Last Transition Time: 2024-07-17T08:35:26Z + Message: client failure; ConditionStatus:True; PodName:pp-horizontal-1 + Observed Generation: 1 + Status: True + Type: ClientFailure--pp-horizontal-1 + Last Transition Time: 2024-07-17T08:35:26Z + Message: is node healthy; ConditionStatus:True; PodName:pp-horizontal-1 + Observed Generation: 1 + Status: True + Type: IsNodeHealthy--pp-horizontal-1 + Last Transition Time: 2024-07-17T08:35:31Z + Message: patch petset; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: PatchPetset--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:35:31Z + Message: pp-horizontal already has desired replicas + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2024-07-17T08:35:36Z + Message: is pod ready; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: IsPodReady--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:35:36Z + Message: client failure; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: ClientFailure--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:35:36Z + Message: is node healthy; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: IsNodeHealthy--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:35:41Z + Message: Successfully updated Pgpool + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-07-17T08:35:41Z + Message: Successfully completed horizontally scale pgpool cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 4m5s KubeDB Ops-manager Operator Start processing for PgpoolOpsRequest: demo/pgpool-horizontal-scale-up + Normal Starting 4m5s KubeDB Ops-manager Operator Pausing Pgpool databse: demo/pp-horizontal + Normal Successful 4m5s KubeDB Ops-manager Operator Successfully paused Pgpool database: demo/pp-horizontal for PgpoolOpsRequest: pgpool-horizontal-scale-up + Normal patch petset; ConditionStatus:True; PodName:pp-horizontal-1 3m57s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:pp-horizontal-1 + Normal is pod ready; ConditionStatus:True; PodName:pp-horizontal-1 3m52s KubeDB Ops-manager Operator is pod ready; ConditionStatus:True; PodName:pp-horizontal-1 + Normal is node healthy; ConditionStatus:True; PodName:pp-horizontal-1 3m52s KubeDB Ops-manager Operator is node healthy; ConditionStatus:True; PodName:pp-horizontal-1 + Normal patch petset; ConditionStatus:True; PodName:pp-horizontal-2 3m47s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:pp-horizontal-2 + Normal is pod ready; ConditionStatus:True; PodName:pp-horizontal-2 3m42s KubeDB Ops-manager Operator is pod ready; ConditionStatus:True; PodName:pp-horizontal-2 + Normal is node healthy; ConditionStatus:True; PodName:pp-horizontal-2 3m42s KubeDB Ops-manager Operator is node healthy; ConditionStatus:True; PodName:pp-horizontal-2 + Normal HorizontalScaleUp 3m37s KubeDB Ops-manager Operator Successfully Scaled Up Node + Normal UpdateDatabase 3m37s KubeDB Ops-manager Operator Successfully updated Pgpool + Normal Starting 3m37s KubeDB Ops-manager Operator Resuming Pgpool database: demo/pp-horizontal + Normal Successful 3m37s KubeDB Ops-manager Operator Successfully resumed Pgpool database: demo/pp-horizontal for PgpoolOpsRequest: pgpool-horizontal-scale-up +``` + +Now, we are going to verify the number of replicas this pgpool has from the Pgpool object, number of pods the petset have, + +```bash +$ kubectl get pp -n demo pp-horizontal -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo pp-horizontal -o json | jq '.spec.replicas' +3 +``` +From all the above outputs we can see that the replicas of the pgpool is `3`. That means we have successfully scaled up the replicas of the Pgpool. + + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the pgpool to meet the desired number of replicas after scaling. + +#### Create PgpoolOpsRequest + +In order to scale down the replicas of the pgpool, we have to create a `PgpoolOpsRequest` CR with our desired replicas. Below is the YAML of the `PgpoolOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgpoolOpsRequest +metadata: + name: pgpool-horizontal-scale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pp-horizontal + horizontalScaling: + node: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `pp-horizontal` pgpool. +- `spec.type` specifies that we are performing `HorizontalScaling` on our pgpool. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `PgpoolOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgpool/scaling/horizontal-scaling/ppops-hscale-down-ops.yaml +pgpoolopsrequest.ops.kubedb.com/pgpool-horizontal-scale-down created +``` + +#### Verify replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Pgpool` object and related `PetSet`. + +Let's wait for `PgpoolOpsRequest` to be `Successful`. Run the following command to watch `PgpoolOpsRequest` CR, + +```bash +$ watch kubectl get pgpoolopsrequest -n demo +Every 2.0s: kubectl get pgpoolopsrequest -n demo +NAME TYPE STATUS AGE +pgpool-horizontal-scale-down HorizontalScaling Successful 75s +``` + +We can see from the above output that the `PgpoolOpsRequest` has succeeded. If we describe the `PgpoolOpsRequest` we will get an overview of the steps that were followed to scale the pgpool. + +```bash +$ kubectl describe pgpoolopsrequest -n demo pgpool-horizontal-scale-down +Name: pgpool-horizontal-scale-down +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgpoolOpsRequest +Metadata: + Creation Timestamp: 2024-07-17T08:52:28Z + Generation: 1 + Resource Version: 63600 + UID: 019f9d8f-c2b0-4154-b3d3-b715b8805fd7 +Spec: + Apply: IfReady + Database Ref: + Name: pp-horizontal + Horizontal Scaling: + Node: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-07-17T08:52:28Z + Message: Pgpool ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-07-17T08:52:31Z + Message: Successfully paused database + Observed Generation: 1 + Reason: DatabasePauseSucceeded + Status: True + Type: DatabasePauseSucceeded + Last Transition Time: 2024-07-17T08:53:16Z + Message: Successfully Scaled Down Node + Observed Generation: 1 + Reason: HorizontalScaleDown + Status: True + Type: HorizontalScaleDown + Last Transition Time: 2024-07-17T08:52:36Z + Message: patch petset; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: PatchPetset--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:52:36Z + Message: pp-horizontal already has desired replicas + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2024-07-17T08:52:41Z + Message: get pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: GetPod + Last Transition Time: 2024-07-17T08:53:11Z + Message: get pod; ConditionStatus:True; PodName:pp-horizontal-2 + Observed Generation: 1 + Status: True + Type: GetPod--pp-horizontal-2 + Last Transition Time: 2024-07-17T08:53:16Z + Message: Successfully updated Pgpool + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-07-17T08:53:16Z + Message: Successfully completed horizontally scale pgpool cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 96s KubeDB Ops-manager Operator Start processing for PgpoolOpsRequest: demo/pgpool-horizontal-scale-down + Normal Starting 96s KubeDB Ops-manager Operator Pausing Pgpool databse: demo/pp-horizontal + Normal Successful 96s KubeDB Ops-manager Operator Successfully paused Pgpool database: demo/pp-horizontal for PgpoolOpsRequest: pgpool-horizontal-scale-down + Normal patch petset; ConditionStatus:True; PodName:pp-horizontal-2 88s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:pp-horizontal-2 + Normal get pod; ConditionStatus:False 83s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Normal get pod; ConditionStatus:True; PodName:pp-horizontal-2 53s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pp-horizontal-2 + Normal HorizontalScaleDown 48s KubeDB Ops-manager Operator Successfully Scaled Down Node + Normal UpdateDatabase 48s KubeDB Ops-manager Operator Successfully updated Pgpool + Normal Starting 48s KubeDB Ops-manager Operator Resuming Pgpool database: demo/pp-horizontal + Normal Successful 48s KubeDB Ops-manager Operator Successfully resumed Pgpool database: demo/pp-horizontal for PgpoolOpsRequest: pgpool-horizontal-scale-down +``` + +Now, we are going to verify the number of replicas this pgpool has from the Pgpool object, number of pods the petset have, + +```bash +$ kubectl get pp -n demo pp-horizontal -o json | jq '.spec.replicas' +2 + +$ kubectl get petset -n demo pp-horizontal -o json | jq '.spec.replicas' +2 +``` +From all the above outputs we can see that the replicas of the pgpool is `2`. That means we have successfully scaled up the replicas of the Pgpool. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n pp-horizontal +kubectl delete pgpoolopsrequest -n demo pgpool-horizontal-scale-down +``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/horizontal-scaling/overview.md b/docs/guides/rabbitmq/scaling/horizontal-scaling/overview.md index 80b5acd76e..6c24ba04ae 100644 --- a/docs/guides/rabbitmq/scaling/horizontal-scaling/overview.md +++ b/docs/guides/rabbitmq/scaling/horizontal-scaling/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ Horizontal Scaling Overview menu: docs_{{ .version }}: - identifier: mg-horizontal-scaling-overview + identifier: rm-horizontal-scaling-overview name: Overview - parent: mg-horizontal-scaling + parent: rm-horizontal-scaling weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -19,18 +19,13 @@ This guide will give an overview on how KubeDB Ops-manager operator scales up or ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Horizontal Scaling Process Works The following diagram shows how KubeDB Ops-manager operator scales up or down `RabbitMQ` database components. Open the image in a new tab to see the enlarged version. -
-  Horizontal scaling process of RabbitMQ -
Fig: Horizontal scaling process of RabbitMQ
-
- The Horizontal scaling process consists of the following steps: 1. At first, a user creates a `RabbitMQ` Custom Resource (CR). diff --git a/docs/guides/rabbitmq/scaling/horizontal-scaling/replicaset.md b/docs/guides/rabbitmq/scaling/horizontal-scaling/replicaset.md deleted file mode 100644 index def80565f5..0000000000 --- a/docs/guides/rabbitmq/scaling/horizontal-scaling/replicaset.md +++ /dev/null @@ -1,692 +0,0 @@ ---- -title: Horizontal Scaling RabbitMQ Replicaset -menu: - docs_{{ .version }}: - identifier: mg-horizontal-scaling-replicaset - name: Replicaset - parent: mg-horizontal-scaling - weight: 20 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Horizontal Scale RabbitMQ Replicaset - -This guide will show you how to use `KubeDB` Ops-manager operator to scale the replicaset of a RabbitMQ database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Replicaset](/docs/guides/RabbitMQ/clustering/replicaset.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Horizontal Scaling Overview](/docs/guides/RabbitMQ/scaling/horizontal-scaling/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Apply Horizontal Scaling on Replicaset - -Here, we are going to deploy a `RabbitMQ` replicaset using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. - -### Prepare RabbitMQ Replicaset Database - -Now, we are going to deploy a `RabbitMQ` replicaset database with version `4.4.26`. - -### Deploy RabbitMQ replicaset - -In this section, we are going to deploy a RabbitMQ replicaset database. Then, in the next section we will scale the database using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-replicaset - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - replicas: 3 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/mg-replicaset.yaml -RabbitMQ.kubedb.com/mg-replicaset created -``` - -Now, wait until `mg-replicaset` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 2m36s -``` - -Let's check the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-replicaset -o json | jq '.spec.replicas' -3 - -$ kubectl get sts -n demo mg-replicaset -o json | jq '.spec.replicas' -3 -``` - -We can see from both command that the database has 3 replicas in the replicaset. - -Also, we can verify the replicas of the replicaset from an internal RabbitMQ command by execing into a replica. - -First we need to get the username and password to connect to a RabbitMQ instance, -```bash -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\password}' | base64 -d -nrKuxni0wDSMrgwy -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 171, - "optime" : { - "ts" : Timestamp(1614698544, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:22:24Z"), - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614698393, 2), - "electionDate" : ISODate("2021-03-02T15:19:53Z"), - "configVersion" : 3, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-replicaset-1.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 128, - "optime" : { - "ts" : Timestamp(1614698544, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698544, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:22:24Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:22:24Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:22:32.411Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:22:31.543Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - }, - { - "_id" : 2, - "name" : "mg-replicaset-2.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 83, - "optime" : { - "ts" : Timestamp(1614698544, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698544, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:22:24Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:22:24Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:22:30.615Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:22:31.543Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - } -] -``` - -We can see from the above output that the replicaset has 3 nodes. - -We are now ready to apply the `RabbitMQOpsRequest` CR to scale this database. - -## Scale Up Replicas - -Here, we are going to scale up the replicas of the replicaset to meet the desired number of replicas after scaling. - -#### Create RabbitMQOpsRequest - -In order to scale up the replicas of the replicaset of the database, we have to create a `RabbitMQOpsRequest` CR with our desired replicas. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-hscale-up-replicaset - namespace: demo -spec: - type: HorizontalScaling - databaseRef: - name: mg-replicaset - horizontalScaling: - replicas: 4 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `mops-hscale-up-replicaset` database. -- `spec.type` specifies that we are performing `HorizontalScaling` on our database. -- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/horizontal-scaling/mops-hscale-up-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-hscale-up-replicaset created -``` - -#### Verify Replicaset replicas scaled up successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `RabbitMQ` object and related `StatefulSets` and `Pods`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-hscale-up-replicaset HorizontalScaling Successful 106s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-hscale-up-replicaset -Name: mops-hscale-up-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T15:23:14Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:horizontalScaling: - .: - f:replicas: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T15:23:14Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T15:23:14Z - Resource Version: 129882 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-hscale-up-replicaset - UID: e97dac5c-5e3a-4153-9b31-8ba02af54bcb -Spec: - Database Ref: - Name: mg-replicaset - Horizontal Scaling: - Replicas: 4 - Type: HorizontalScaling -Status: - Conditions: - Last Transition Time: 2021-03-02T15:23:14Z - Message: RabbitMQ ops request is horizontally scaling database - Observed Generation: 1 - Reason: HorizontalScaling - Status: True - Type: HorizontalScaling - Last Transition Time: 2021-03-02T15:24:00Z - Message: Successfully Horizontally Scaled Up ReplicaSet - Observed Generation: 1 - Reason: ScaleUpReplicaSet - Status: True - Type: ScaleUpReplicaSet - Last Transition Time: 2021-03-02T15:24:00Z - Message: Successfully Horizontally Scaled RabbitMQ - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 91s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-replicaset - Normal PauseDatabase 91s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-replicaset - Normal ScaleUpReplicaSet 45s KubeDB Ops-manager operator Successfully Horizontally Scaled Up ReplicaSet - Normal ResumeDatabase 45s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-replicaset - Normal ResumeDatabase 45s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-replicaset - Normal Successful 45s KubeDB Ops-manager operator Successfully Horizontally Scaled Database -``` - -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-replicaset -o json | jq '.spec.replicas' -4 - -$ kubectl get sts -n demo mg-replicaset -o json | jq '.spec.replicas' -4 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 344, - "optime" : { - "ts" : Timestamp(1614698724, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:25:24Z"), - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614698393, 2), - "electionDate" : ISODate("2021-03-02T15:19:53Z"), - "configVersion" : 4, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-replicaset-1.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 301, - "optime" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:25:12Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:25:12Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:25:23.889Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:25:25.179Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 4 - }, - { - "_id" : 2, - "name" : "mg-replicaset-2.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 256, - "optime" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:25:12Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:25:12Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:25:23.888Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:25:25.136Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 4 - }, - { - "_id" : 3, - "name" : "mg-replicaset-3.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 93, - "optime" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698712, 2), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:25:12Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:25:12Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:25:23.926Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:25:24.089Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 4 - } -] -``` - -From all the above outputs we can see that the replicas of the replicaset is `4`. That means we have successfully scaled up the replicas of the RabbitMQ replicaset. - - -### Scale Down Replicas - -Here, we are going to scale down the replicas of the replicaset to meet the desired number of replicas after scaling. - -#### Create RabbitMQOpsRequest - -In order to scale down the replicas of the replicaset of the database, we have to create a `RabbitMQOpsRequest` CR with our desired replicas. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-hscale-down-replicaset - namespace: demo -spec: - type: HorizontalScaling - databaseRef: - name: mg-replicaset - horizontalScaling: - replicas: 3 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `mops-hscale-down-replicaset` database. -- `spec.type` specifies that we are performing `HorizontalScaling` on our database. -- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/horizontal-scaling/mops-hscale-down-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-hscale-down-replicaset created -``` - -#### Verify Replicaset replicas scaled down successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `RabbitMQ` object and related `StatefulSets` and `Pods`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-hscale-down-replicaset HorizontalScaling Successful 2m32s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-hscale-down-replicaset -Name: mops-hscale-down-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T15:25:57Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:horizontalScaling: - .: - f:replicas: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T15:25:57Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T15:25:57Z - Resource Version: 130393 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-hscale-down-replicaset - UID: fbfee7f8-1dd5-4f58-aad7-ad2e2d66b295 -Spec: - Database Ref: - Name: mg-replicaset - Horizontal Scaling: - Replicas: 3 - Type: HorizontalScaling -Status: - Conditions: - Last Transition Time: 2021-03-02T15:25:57Z - Message: RabbitMQ ops request is horizontally scaling database - Observed Generation: 1 - Reason: HorizontalScaling - Status: True - Type: HorizontalScaling - Last Transition Time: 2021-03-02T15:26:17Z - Message: Successfully Horizontally Scaled Down ReplicaSet - Observed Generation: 1 - Reason: ScaleDownReplicaSet - Status: True - Type: ScaleDownReplicaSet - Last Transition Time: 2021-03-02T15:26:17Z - Message: Successfully Horizontally Scaled RabbitMQ - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 50s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-replicaset - Normal PauseDatabase 50s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-replicaset - Normal ScaleDownReplicaSet 30s KubeDB Ops-manager operator Successfully Horizontally Scaled Down ReplicaSet - Normal ResumeDatabase 30s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-replicaset - Normal ResumeDatabase 30s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-replicaset - Normal Successful 30s KubeDB Ops-manager operator Successfully Horizontally Scaled Database -``` - -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-replicaset -o json | jq '.spec.replicas' -3 - -$ kubectl get sts -n demo mg-replicaset -o json | jq '.spec.replicas' -3 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 410, - "optime" : { - "ts" : Timestamp(1614698784, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:26:24Z"), - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614698393, 2), - "electionDate" : ISODate("2021-03-02T15:19:53Z"), - "configVersion" : 5, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-replicaset-1.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 367, - "optime" : { - "ts" : Timestamp(1614698784, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698784, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:26:24Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:26:24Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:26:29.423Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:26:29.330Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 5 - }, - { - "_id" : 2, - "name" : "mg-replicaset-2.mg-replicaset-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 322, - "optime" : { - "ts" : Timestamp(1614698784, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614698784, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:26:24Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:26:24Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:26:31.022Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:26:31.224Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-replicaset-0.mg-replicaset-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 5 - } -] -``` - -From all the above outputs we can see that the replicas of the replicaset is `3`. That means we have successfully scaled down the replicas of the RabbitMQ replicaset. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete RabbitMQopsrequest -n demo mops-vscale-replicaset -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/horizontal-scaling/sharding.md b/docs/guides/rabbitmq/scaling/horizontal-scaling/sharding.md deleted file mode 100644 index 85daf6bc49..0000000000 --- a/docs/guides/rabbitmq/scaling/horizontal-scaling/sharding.md +++ /dev/null @@ -1,1436 +0,0 @@ ---- -title: Horizontal Scaling RabbitMQ Shard -menu: - docs_{{ .version }}: - identifier: mg-horizontal-scaling-shard - name: Sharding - parent: mg-horizontal-scaling - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Horizontal Scale RabbitMQ Shard - -This guide will show you how to use `KubeDB` Ops-manager operator to scale the shard of a RabbitMQ database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Sharding](/docs/guides/RabbitMQ/clustering/sharding.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Horizontal Scaling Overview](/docs/guides/RabbitMQ/scaling/horizontal-scaling/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Apply Horizontal Scaling on Sharded Database - -Here, we are going to deploy a `RabbitMQ` sharded database using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. - -### Prepare RabbitMQ Sharded Database - -Now, we are going to deploy a `RabbitMQ` sharded database with version `4.4.26`. - -### Deploy RabbitMQ Sharded Database - -In this section, we are going to deploy a RabbitMQ sharded database. Then, in the next sections we will scale shards of the database using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sharding - namespace: demo -spec: - version: 4.4.26 - shardTopology: - configServer: - replicas: 3 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - mongos: - replicas: 2 - shard: - replicas: 3 - shards: 2 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/mg-shard.yaml -RabbitMQ.kubedb.com/mg-sharding created -``` - -Now, wait until `mg-sharding` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sharding 4.4.26 Ready 10m -``` - -##### Verify Number of Shard and Shard Replicas - -Let's check the number of shards this database from the RabbitMQ object and the number of statefulsets it has, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.shards' -2 - -$ kubectl get sts -n demo -NAME READY AGE -mg-sharding-configsvr 3/3 23m -mg-sharding-mongos 2/2 22m -mg-sharding-shard0 3/3 23m -mg-sharding-shard1 3/3 23m -``` - -So, We can see from the both output that the database has 2 shards. - -Now, Let's check the number of replicas each shard has from the RabbitMQ object and the number of pod the statefulsets have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.replicas' -3 - -$ kubectl get sts -n demo mg-sharding-shard0 -o json | jq '.spec.replicas' -3 -``` - -We can see from both output that the database has 3 replicas in each shards. - -Also, we can verify the number of shard from an internal RabbitMQ command by execing into a mongos node. - -First we need to get the username and password to connect to a mongos instance, -```bash -$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\password}' | base64 -d -xBC-EwMFivFCgUlK -``` - -Now let's connect to a mongos instance and run a RabbitMQ internal command to check the number of shards, - -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 2 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 0 - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } -``` - -We can see from the above output that the number of shard is 2. - -Also, we can verify the number of replicas each shard has from an internal RabbitMQ command by execing into a shard node. - -Now let's connect to a shard instance and run a RabbitMQ internal command to check the number of replicas, - -```bash -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 338, - "optime" : { - "ts" : Timestamp(1614699416, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:36:56Z"), - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614699092, 1), - "electionDate" : ISODate("2021-03-02T15:31:32Z"), - "configVersion" : 3, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 291, - "optime" : { - "ts" : Timestamp(1614699413, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614699413, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:36:53Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:36:53Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:36:56.692Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:36:56.015Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - }, - { - "_id" : 2, - "name" : "mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 259, - "optime" : { - "ts" : Timestamp(1614699413, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614699413, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:36:53Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:36:53Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:36:56.732Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:36:57.773Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - } -] -``` - -We can see from the above output that the number of replica is 3. - -##### Verify Number of ConfigServer - -Let's check the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.configServer.replicas' -3 - -$ kubectl get sts -n demo mg-sharding-configsvr -o json | jq '.spec.replicas' -3 -``` - -We can see from both command that the database has `3` replicas in the configServer. - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, - -```bash -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 423, - "optime" : { - "ts" : Timestamp(1614699492, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:38:12Z"), - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614699081, 2), - "electionDate" : ISODate("2021-03-02T15:31:21Z"), - "configVersion" : 3, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 385, - "optime" : { - "ts" : Timestamp(1614699492, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614699492, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:38:12Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:38:12Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:38:13.573Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:38:12.725Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - }, - { - "_id" : 2, - "name" : "mg-sharding-configsvr-2.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 340, - "optime" : { - "ts" : Timestamp(1614699490, 8), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1614699490, 8), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2021-03-02T15:38:10Z"), - "optimeDurableDate" : ISODate("2021-03-02T15:38:10Z"), - "lastHeartbeat" : ISODate("2021-03-02T15:38:11.665Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T15:38:11.827Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 0, - "infoMessage" : "", - "configVersion" : 3 - } -] -``` - -We can see from the above output that the configServer has 3 nodes. - -##### Verify Number of Mongos -Let's check the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.mongos.replicas' -2 - -$ kubectl get sts -n demo mg-sharding-mongos -o json | jq '.spec.replicas' -2 -``` - -We can see from both command that the database has `2` replicas in the mongos. - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, - -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 2 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 0 - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } -``` - -We can see from the above output that the mongos has 2 active nodes. - -We are now ready to apply the `RabbitMQOpsRequest` CR to update scale up and down all the components of the database. - -### Scale Up - -Here, we are going to scale up all the components of the database to meet the desired number of replicas after scaling. - -#### Create RabbitMQOpsRequest - -In order to scale up, we have to create a `RabbitMQOpsRequest` CR with our configuration. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-hscale-up-shard - namespace: demo -spec: - type: HorizontalScaling - databaseRef: - name: mg-sharding - horizontalScaling: - shard: - shards: 3 - replicas: 4 - mongos: - replicas: 3 - configServer: - replicas: 4 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `mops-hscale-up-shard` database. -- `spec.type` specifies that we are performing `HorizontalScaling` on our database. -- `spec.horizontalScaling.shard.shards` specifies the desired number of shards after scaling. -- `spec.horizontalScaling.shard.replicas` specifies the desired number of replicas of each shard after scaling. -- `spec.horizontalScaling.mongos.replicas` specifies the desired replicas after scaling. -- `spec.horizontalScaling.configServer.replicas` specifies the desired replicas after scaling. - -> **Note:** If you don't want to scale all the components together, you can only specify the components (shard, configServer and mongos) that you want to scale. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/horizontal-scaling/mops-hscale-up-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-hscale-up-shard created -``` - -#### Verify scaling up is successful - -If everything goes well, `KubeDB` Ops-manager operator will update the shard and replicas of `RabbitMQ` object and related `StatefulSets` and `Pods`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-hscale-up-shard HorizontalScaling Successful 9m57s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-hscale-up-shard -Name: mops-hscale-up-shard -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T16:23:16Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:horizontalScaling: - .: - f:configServer: - .: - f:replicas: - f:mongos: - .: - f:replicas: - f:shard: - .: - f:replicas: - f:shards: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T16:23:16Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T16:23:16Z - Resource Version: 147313 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-hscale-up-shard - UID: 982014fc-1655-44e7-946c-859626ae0247 -Spec: - Database Ref: - Name: mg-sharding - Horizontal Scaling: - Config Server: - Replicas: 4 - Mongos: - Replicas: 3 - Shard: - Replicas: 4 - Shards: 3 - Type: HorizontalScaling -Status: - Conditions: - Last Transition Time: 2021-03-02T16:23:16Z - Message: RabbitMQ ops request is horizontally scaling database - Observed Generation: 1 - Reason: HorizontalScaling - Status: True - Type: HorizontalScaling - Last Transition Time: 2021-03-02T16:25:31Z - Message: Successfully Horizontally Scaled Up Shard Replicas - Observed Generation: 1 - Reason: ScaleUpShardReplicas - Status: True - Type: ScaleUpShardReplicas - Last Transition Time: 2021-03-02T16:33:07Z - Message: Successfully Horizontally Scaled Up Shard - Observed Generation: 1 - Reason: ScaleUpShard - Status: True - Type: ScaleUpShard - Last Transition Time: 2021-03-02T16:34:35Z - Message: Successfully Horizontally Scaled Up ConfigServer - Observed Generation: 1 - Reason: ScaleUpConfigServer - Status: True - Type: ScaleUpConfigServer - Last Transition Time: 2021-03-02T16:36:30Z - Message: Successfully Horizontally Scaled Mongos - Observed Generation: 1 - Reason: ScaleMongos - Status: True - Type: ScaleMongos - Last Transition Time: 2021-03-02T16:36:30Z - Message: Successfully Horizontally Scaled RabbitMQ - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 13m KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-sharding - Normal PauseDatabase 13m KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-sharding - Normal ScaleUpShardReplicas 11m KubeDB Ops-manager operator Successfully Horizontally Scaled Up Shard Replicas - Normal ResumeDatabase 11m KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-sharding - Normal ResumeDatabase 11m KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-sharding - Normal ScaleUpShardReplicas 11m KubeDB Ops-manager operator Successfully Horizontally Scaled Up Shard Replicas - Normal ScaleUpShardReplicas 11m KubeDB Ops-manager operator Successfully Horizontally Scaled Up Shard Replicas - Normal Progressing 8m20s KubeDB Ops-manager operator Successfully updated StatefulSets Resources - Normal Progressing 4m5s KubeDB Ops-manager operator Successfully updated StatefulSets Resources - Normal ScaleUpShard 3m59s KubeDB Ops-manager operator Successfully Horizontally Scaled Up Shard - Normal PauseDatabase 3m59s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-sharding - Normal PauseDatabase 3m59s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-sharding - Normal ScaleUpConfigServer 2m31s KubeDB Ops-manager operator Successfully Horizontally Scaled Up ConfigServer - Normal ScaleMongos 36s KubeDB Ops-manager operator Successfully Horizontally Scaled Mongos - Normal ResumeDatabase 36s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-sharding - Normal ResumeDatabase 36s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-sharding - Normal Successful 36s KubeDB Ops-manager operator Successfully Horizontally Scaled Database -``` - -#### Verify Number of Shard and Shard Replicas - -Now, we are going to verify the number of shards this database has from the RabbitMQ object, number of statefulsets it has, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.shards' -3 - -$ kubectl get sts -n demo -NAME READY AGE -mg-sharding-configsvr 4/4 66m -mg-sharding-mongos 3/3 64m -mg-sharding-shard0 4/4 66m -mg-sharding-shard1 4/4 66m -mg-sharding-shard2 4/4 12m -``` - -Now let's connect to a mongos instance and run a RabbitMQ internal command to check the number of shards, -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-3.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-3.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard2", "host" : "shard2/mg-sharding-shard2-0.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-1.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-2.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-3.mg-sharding-shard2-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 3 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 2 - Last reported error: Couldn't get a connection within the time limit - Time of Reported error: Tue Mar 02 2021 16:17:53 GMT+0000 (UTC) - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } - config.system.sessions - shard key: { "_id" : 1 } - unique: false - balancing: true - chunks: - shard0 1 - { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0 Timestamp(1, 0) -``` - -From all the above outputs we can see that the number of shards are `3`. - -Now, we are going to verify the number of replicas each shard has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.replicas' -4 - -$ kubectl get sts -n demo mg-sharding-shard0 -o json | jq '.spec.replicas' -4 -``` - -Now let's connect to a shard instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 1464, - "optime" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:39:03Z"), - "syncingTo" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 4, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 1433, - "optime" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:39:03Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:39:03Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:39:07.800Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:39:08.087Z"), - "pingMs" : NumberLong(6), - "lastHeartbeatMessage" : "", - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614701678, 2), - "electionDate" : ISODate("2021-03-02T16:14:38Z"), - "configVersion" : 4 - }, - { - "_id" : 2, - "name" : "mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 1433, - "optime" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:39:03Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:39:03Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:39:08.575Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:39:08.580Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 4 - }, - { - "_id" : 3, - "name" : "mg-sharding-shard0-3.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 905, - "optime" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703143, 10), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:39:03Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:39:03Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:39:06.683Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:39:07.980Z"), - "pingMs" : NumberLong(10), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 4 - } -] -``` - -From all the above outputs we can see that the replicas of each shard has is `4`. - -#### Verify Number of ConfigServer Replicas -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.configServer.replicas' -4 - -$ kubectl get sts -n demo mg-sharding-configsvr -o json | jq '.spec.replicas' -4 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 1639, - "optime" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:38:58Z"), - "syncingTo" : "mg-sharding-configsvr-2.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-2.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 2, - "infoMessage" : "", - "configVersion" : 4, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 1623, - "optime" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:38:58Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:38:58Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:38:58.979Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:38:59.291Z"), - "pingMs" : NumberLong(3), - "lastHeartbeatMessage" : "", - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614701497, 2), - "electionDate" : ISODate("2021-03-02T16:11:37Z"), - "configVersion" : 4 - }, - { - "_id" : 2, - "name" : "mg-sharding-configsvr-2.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 1623, - "optime" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:38:58Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:38:58Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:38:58.885Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:39:00.188Z"), - "pingMs" : NumberLong(3), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 4 - }, - { - "_id" : 3, - "name" : "mg-sharding-configsvr-3.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 296, - "optime" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703138, 2), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:38:58Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:38:58Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:38:58.977Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:39:00.276Z"), - "pingMs" : NumberLong(1), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 4 - } -] -``` - -From all the above outputs we can see that the replicas of the configServer is `3`. That means we have successfully scaled up the replicas of the RabbitMQ configServer replicas. - -#### Verify Number of Mongos Replicas -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.mongos.replicas' -3 - -$ kubectl get sts -n demo mg-sharding-mongos -o json | jq '.spec.replicas' -3 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-3.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-3.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard2", "host" : "shard2/mg-sharding-shard2-0.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-1.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-2.mg-sharding-shard2-pods.demo.svc.cluster.local:27017,mg-sharding-shard2-3.mg-sharding-shard2-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 3 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 2 - Last reported error: Couldn't get a connection within the time limit - Time of Reported error: Tue Mar 02 2021 16:17:53 GMT+0000 (UTC) - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } - config.system.sessions - shard key: { "_id" : 1 } - unique: false - balancing: true - chunks: - shard0 1 - { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0 Timestamp(1, 0) -``` - -From all the above outputs we can see that the replicas of the mongos is `3`. That means we have successfully scaled up the replicas of the RabbitMQ mongos replicas. - - -So, we have successfully scaled up all the components of the RabbitMQ database. - -### Scale Down - -Here, we are going to scale down both the shard and their replicas to meet the desired number of replicas after scaling. - -#### Create RabbitMQOpsRequest - -In order to scale down, we have to create a `RabbitMQOpsRequest` CR with our configuration. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-hscale-down-shard - namespace: demo -spec: - type: HorizontalScaling - databaseRef: - name: mg-sharding - horizontalScaling: - shard: - shards: 2 - replicas: 3 - mongos: - replicas: 2 - configServer: - replicas: 3 -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `mops-hscale-down-shard` database. -- `spec.type` specifies that we are performing `HorizontalScaling` on our database. -- `spec.horizontalScaling.shard.shards` specifies the desired number of shards after scaling. -- `spec.horizontalScaling.shard.replicas` specifies the desired number of replicas of each shard after scaling. -- `spec.horizontalScaling.configServer.replicas` specifies the desired replicas after scaling. -- `spec.horizontalScaling.mongos.replicas` specifies the desired replicas after scaling. - -> **Note:** If you don't want to scale all the components together, you can only specify the components (shard, configServer and mongos) that you want to scale. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/horizontal-scaling/mops-hscale-down-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-hscale-down-shard created -``` - -#### Verify scaling down is successful - -If everything goes well, `KubeDB` Ops-manager operator will update the shards and replicas `RabbitMQ` object and related `StatefulSets` and `Pods`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ watch kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-hscale-down-shard HorizontalScaling Successful 81s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale down the the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-hscale-down-shard -Name: mops-hscale-down-shard -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T16:41:11Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:databaseRef: - .: - f:name: - f:horizontalScaling: - .: - f:configServer: - .: - f:replicas: - f:mongos: - .: - f:replicas: - f:shard: - .: - f:replicas: - f:shards: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T16:41:11Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T16:41:11Z - Resource Version: 149077 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-hscale-down-shard - UID: 0f83c457-9498-4144-a397-226141851751 -Spec: - Database Ref: - Name: mg-sharding - Horizontal Scaling: - Config Server: - Replicas: 3 - Mongos: - Replicas: 2 - Shard: - Replicas: 3 - Shards: 2 - Type: HorizontalScaling -Status: - Conditions: - Last Transition Time: 2021-03-02T16:41:11Z - Message: RabbitMQ ops request is horizontally scaling database - Observed Generation: 1 - Reason: HorizontalScaling - Status: True - Type: HorizontalScaling - Last Transition Time: 2021-03-02T16:42:11Z - Message: Successfully Horizontally Scaled Down Shard Replicas - Observed Generation: 1 - Reason: ScaleDownShardReplicas - Status: True - Type: ScaleDownShardReplicas - Last Transition Time: 2021-03-02T16:42:12Z - Message: Successfully started RabbitMQ load balancer - Observed Generation: 1 - Reason: StartingBalancer - Status: True - Type: StartingBalancer - Last Transition Time: 2021-03-02T16:43:03Z - Message: Successfully Horizontally Scaled Down Shard - Observed Generation: 1 - Reason: ScaleDownShard - Status: True - Type: ScaleDownShard - Last Transition Time: 2021-03-02T16:43:24Z - Message: Successfully Horizontally Scaled Down ConfigServer - Observed Generation: 1 - Reason: ScaleDownConfigServer - Status: True - Type: ScaleDownConfigServer - Last Transition Time: 2021-03-02T16:43:34Z - Message: Successfully Horizontally Scaled Mongos - Observed Generation: 1 - Reason: ScaleMongos - Status: True - Type: ScaleMongos - Last Transition Time: 2021-03-02T16:43:34Z - Message: Successfully Horizontally Scaled RabbitMQ - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 6m29s KubeDB Ops-manager operator Pausing RabbitMQ demo/mg-sharding - Normal PauseDatabase 6m29s KubeDB Ops-manager operator Successfully paused RabbitMQ demo/mg-sharding - Normal ScaleDownShardReplicas 5m29s KubeDB Ops-manager operator Successfully Horizontally Scaled Down Shard Replicas - Normal StartingBalancer 5m29s KubeDB Ops-manager operator Starting Balancer - Normal StartingBalancer 5m28s KubeDB Ops-manager operator Successfully Started Balancer - Normal ScaleDownShard 4m37s KubeDB Ops-manager operator Successfully Horizontally Scaled Down Shard - Normal ScaleDownConfigServer 4m16s KubeDB Ops-manager operator Successfully Horizontally Scaled Down ConfigServer - Normal ScaleMongos 4m6s KubeDB Ops-manager operator Successfully Horizontally Scaled Mongos - Normal ResumeDatabase 4m6s KubeDB Ops-manager operator Resuming RabbitMQ demo/mg-sharding - Normal ResumeDatabase 4m6s KubeDB Ops-manager operator Successfully resumed RabbitMQ demo/mg-sharding - Normal Successful 4m6s KubeDB Ops-manager operator Successfully Horizontally Scaled Database -``` - -##### Verify Number of Shard and Shard Replicas - -Now, we are going to verify the number of shards this database has from the RabbitMQ object, number of statefulsets it has, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.shards' -2 - -$ kubectl get sts -n demo -NAME READY AGE -mg-sharding-configsvr 3/3 77m -mg-sharding-mongos 2/2 75m -mg-sharding-shard0 3/3 77m -mg-sharding-shard1 3/3 77m -``` - -Now let's connect to a mongos instance and run a RabbitMQ internal command to check the number of shards, -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 2 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 2 - Last reported error: Couldn't get a connection within the time limit - Time of Reported error: Tue Mar 02 2021 16:17:53 GMT+0000 (UTC) - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } - config.system.sessions - shard key: { "_id" : 1 } - unique: false - balancing: true - chunks: - shard0 1 - { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0 Timestamp(1, 0) -``` - -From all the above outputs we can see that the number of shards are `2`. - -Now, we are going to verify the number of replicas each shard has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.shard.replicas' -3 - -$ kubectl get sts -n demo mg-sharding-shard0 -o json | jq '.spec.replicas' -3 -``` - -Now let's connect to a shard instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 2096, - "optime" : { - "ts" : Timestamp(1614703771, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:49:31Z"), - "syncingTo" : "mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 2, - "infoMessage" : "", - "configVersion" : 5, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 2065, - "optime" : { - "ts" : Timestamp(1614703771, 1), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703771, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:49:31Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:49:31Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:49:39.092Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:49:40.074Z"), - "pingMs" : NumberLong(18), - "lastHeartbeatMessage" : "", - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614701678, 2), - "electionDate" : ISODate("2021-03-02T16:14:38Z"), - "configVersion" : 5 - }, - { - "_id" : 2, - "name" : "mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 2065, - "optime" : { - "ts" : Timestamp(1614703771, 1), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703771, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:49:31Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:49:31Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:49:38.712Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:49:39.885Z"), - "pingMs" : NumberLong(4), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 5 - } -] -``` - -From all the above outputs we can see that the replicas of each shard has is `3`. - -##### Verify Number of ConfigServer Replicas - -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.configServer.replicas' -3 - -$ kubectl get sts -n demo mg-sharding-configsvr -o json | jq '.spec.replicas' -3 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "db.adminCommand( { replSetGetStatus : 1 } ).members" --quiet -[ - { - "_id" : 0, - "name" : "mg-sharding-configsvr-0.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 2345, - "optime" : { - "ts" : Timestamp(1614703841, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:50:41Z"), - "syncingTo" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 5, - "self" : true, - "lastHeartbeatMessage" : "" - }, - { - "_id" : 1, - "name" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 2329, - "optime" : { - "ts" : Timestamp(1614703841, 1), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703841, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:50:41Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:50:41Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:50:45.874Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:50:44.194Z"), - "pingMs" : NumberLong(0), - "lastHeartbeatMessage" : "", - "syncingTo" : "", - "syncSourceHost" : "", - "syncSourceId" : -1, - "infoMessage" : "", - "electionTime" : Timestamp(1614701497, 2), - "electionDate" : ISODate("2021-03-02T16:11:37Z"), - "configVersion" : 5 - }, - { - "_id" : 2, - "name" : "mg-sharding-configsvr-2.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 2329, - "optime" : { - "ts" : Timestamp(1614703841, 1), - "t" : NumberLong(2) - }, - "optimeDurable" : { - "ts" : Timestamp(1614703841, 1), - "t" : NumberLong(2) - }, - "optimeDate" : ISODate("2021-03-02T16:50:41Z"), - "optimeDurableDate" : ISODate("2021-03-02T16:50:41Z"), - "lastHeartbeat" : ISODate("2021-03-02T16:50:45.778Z"), - "lastHeartbeatRecv" : ISODate("2021-03-02T16:50:46.091Z"), - "pingMs" : NumberLong(1), - "lastHeartbeatMessage" : "", - "syncingTo" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceHost" : "mg-sharding-configsvr-1.mg-sharding-configsvr-pods.demo.svc.cluster.local:27017", - "syncSourceId" : 1, - "infoMessage" : "", - "configVersion" : 5 - } -] -``` - -From all the above outputs we can see that the replicas of the configServer is `3`. That means we have successfully scaled down the replicas of the RabbitMQ configServer replicas. - -##### Verify Number of Mongos Replicas - -Now, we are going to verify the number of replicas this database has from the RabbitMQ object, number of pods the statefulset have, - -```bash -$ kubectl get RabbitMQ -n demo mg-sharding -o json | jq '.spec.shardTopology.mongos.replicas' -2 - -$ kubectl get sts -n demo mg-sharding-mongos -o json | jq '.spec.replicas' -2 -``` - -Now let's connect to a RabbitMQ instance and run a RabbitMQ internal command to check the number of replicas, -```bash -$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p xBC-EwMFivFCgUlK --eval "sh.status()" --quiet ---- Sharding Status --- - sharding version: { - "_id" : 1, - "minCompatibleVersion" : 5, - "currentVersion" : 6, - "clusterId" : ObjectId("603e5a4bec470e6b4197e10b") - } - shards: - { "_id" : "shard0", "host" : "shard0/mg-sharding-shard0-0.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-1.mg-sharding-shard0-pods.demo.svc.cluster.local:27017,mg-sharding-shard0-2.mg-sharding-shard0-pods.demo.svc.cluster.local:27017", "state" : 1 } - { "_id" : "shard1", "host" : "shard1/mg-sharding-shard1-0.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-1.mg-sharding-shard1-pods.demo.svc.cluster.local:27017,mg-sharding-shard1-2.mg-sharding-shard1-pods.demo.svc.cluster.local:27017", "state" : 1 } - active mongoses: - "4.4.26" : 2 - autosplit: - Currently enabled: yes - balancer: - Currently enabled: yes - Currently running: no - Failed balancer rounds in last 5 attempts: 2 - Last reported error: Couldn't get a connection within the time limit - Time of Reported error: Tue Mar 02 2021 16:17:53 GMT+0000 (UTC) - Migration Results for the last 24 hours: - No recent migrations - databases: - { "_id" : "config", "primary" : "config", "partitioned" : true } - config.system.sessions - shard key: { "_id" : 1 } - unique: false - balancing: true - chunks: - shard0 1 - { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0 Timestamp(1, 0) -``` - -From all the above outputs we can see that the replicas of the mongos is `2`. That means we have successfully scaled down the replicas of the RabbitMQ mongos replicas. - -So, we have successfully scaled down all the components of the RabbitMQ database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-sharding -kubectl delete RabbitMQopsrequest -n demo mops-vscale-up-shard mops-vscale-down-shard -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/vertical-scaling/_index.md b/docs/guides/rabbitmq/scaling/vertical-scaling/_index.md index b14609e8a3..f3aa57f2d5 100644 --- a/docs/guides/rabbitmq/scaling/vertical-scaling/_index.md +++ b/docs/guides/rabbitmq/scaling/vertical-scaling/_index.md @@ -2,9 +2,9 @@ title: Vertical Scaling menu: docs_{{ .version }}: - identifier: mg-vertical-scaling + identifier: rm-vertical-scaling name: Vertical Scaling - parent: mg-scaling + parent: rm-scaling weight: 20 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/vertical-scaling/overview.md b/docs/guides/rabbitmq/scaling/vertical-scaling/overview.md index 4f137b7fe3..a126638539 100644 --- a/docs/guides/rabbitmq/scaling/vertical-scaling/overview.md +++ b/docs/guides/rabbitmq/scaling/vertical-scaling/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ Vertical Scaling Overview menu: docs_{{ .version }}: - identifier: mg-vertical-scaling-overview + identifier: rm-vertical-scaling-overview name: Overview - parent: mg-vertical-scaling + parent: rm-vertical-scaling weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -19,18 +19,13 @@ This guide will give an overview on how KubeDB Ops-manager operator updates the ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Vertical Scaling Process Works The following diagram shows how KubeDB Ops-manager operator updates the resources of the `RabbitMQ` database. Open the image in a new tab to see the enlarged version. -
-  Vertical scaling process of RabbitMQ -
Fig: Vertical scaling process of RabbitMQ
-
- The vertical scaling process consists of the following steps: 1. At first, a user creates a `RabbitMQ` Custom Resource (CR). diff --git a/docs/guides/rabbitmq/scaling/vertical-scaling/replicaset.md b/docs/guides/rabbitmq/scaling/vertical-scaling/replicaset.md deleted file mode 100644 index ee49e0e59a..0000000000 --- a/docs/guides/rabbitmq/scaling/vertical-scaling/replicaset.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -title: Vertical Scaling RabbitMQ Replicaset -menu: - docs_{{ .version }}: - identifier: mg-vertical-scaling-replicaset - name: Replicaset - parent: mg-vertical-scaling - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Vertical Scale RabbitMQ Replicaset - -This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a RabbitMQ replicaset database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Replicaset](/docs/guides/RabbitMQ/clustering/replicaset.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Vertical Scaling Overview](/docs/guides/RabbitMQ/scaling/vertical-scaling/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Apply Vertical Scaling on Replicaset - -Here, we are going to deploy a `RabbitMQ` replicaset using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. - -### Prepare RabbitMQ Replicaset Database - -Now, we are going to deploy a `RabbitMQ` replicaset database with version `4.4.26`. - -### Deploy RabbitMQ replicaset - -In this section, we are going to deploy a RabbitMQ replicaset database. Then, in the next section we will update the resources of the database using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-replicaset - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - replicas: 3 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/mg-replicaset.yaml -RabbitMQ.kubedb.com/mg-replicaset created -``` - -Now, wait until `mg-replicaset` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 3m46s -``` - -Let's check the Pod containers resources, - -```bash -$ kubectl get pod -n demo mg-replicaset-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "500m", - "memory": "1Gi" - }, - "requests": { - "cpu": "500m", - "memory": "1Gi" - } -} -``` - -You can see the Pod has the default resources which is assigned by Kubedb operator. - -We are now ready to apply the `RabbitMQOpsRequest` CR to update the resources of this database. - -### Vertical Scaling - -Here, we are going to update the resources of the replicaset database to meet the desired resources after scaling. - -#### Create RabbitMQOpsRequest - -In order to update the resources of the database, we have to create a `RabbitMQOpsRequest` CR with our desired resources. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-vscale-replicaset - namespace: demo -spec: - type: VerticalScaling - databaseRef: - name: mg-replicaset - verticalScaling: - replicaSet: - resources: - requests: - memory: "1.2Gi" - cpu: "0.6" - limits: - memory: "1.2Gi" - cpu: "0.6" - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mops-vscale-replicaset` database. -- `spec.type` specifies that we are performing `VerticalScaling` on our database. -- `spec.VerticalScaling.replicaSet` specifies the desired resources after scaling. -- `spec.VerticalScaling.arbiter` could also be specified in similar fashion to get the desired resources for arbiter pod. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/vertical-scaling/mops-vscale-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-vscale-replicaset created -``` - -#### Verify RabbitMQ Replicaset resources updated successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the resources of `RabbitMQ` object and related `StatefulSets` and `Pods`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-vscale-replicaset VerticalScaling Successful 3m56s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-vscale-replicaset -Name: mops-vscale-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2022-10-26T10:41:56Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:databaseRef: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - f:verticalScaling: - .: - f:replicaSet: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-10-26T10:41:56Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-ops-manager - Operation: Update - Subresource: status - Time: 2022-10-26T10:44:33Z - Resource Version: 611468 - UID: 474053a7-90a8-49fd-9b27-c9bf7b4660e7 -Spec: - Apply: IfReady - Database Ref: - Name: mg-replicaset - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: VerticalScaling - Vertical Scaling: - Replica Set: - Limits: - Cpu: 0.6 - Memory: 1.2Gi - Requests: - Cpu: 0.6 - Memory: 1.2Gi -Status: - Conditions: - Last Transition Time: 2022-10-26T10:43:21Z - Message: RabbitMQ ops request is vertically scaling database - Observed Generation: 1 - Reason: VerticalScaling - Status: True - Type: VerticalScaling - Last Transition Time: 2022-10-26T10:44:33Z - Message: Successfully Vertically Scaled Replicaset Resources - Observed Generation: 1 - Reason: UpdateReplicaSetResources - Status: True - Type: UpdateReplicaSetResources - Last Transition Time: 2022-10-26T10:44:33Z - Message: Successfully Vertically Scaled Database - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 82s KubeDB Ops-manager Operator Pausing RabbitMQ demo/mg-replicaset - Normal PauseDatabase 82s KubeDB Ops-manager Operator Successfully paused RabbitMQ demo/mg-replicaset - Normal Starting 82s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-replicaset - Normal UpdateReplicaSetResources 82s KubeDB Ops-manager Operator Successfully updated replicaset Resources - Normal Starting 82s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-replicaset - Normal UpdateReplicaSetResources 82s KubeDB Ops-manager Operator Successfully updated replicaset Resources - Normal UpdateReplicaSetResources 10s KubeDB Ops-manager Operator Successfully Vertically Scaled Replicaset Resources - Normal ResumeDatabase 10s KubeDB Ops-manager Operator Resuming RabbitMQ demo/mg-replicaset - Normal ResumeDatabase 10s KubeDB Ops-manager Operator Successfully resumed RabbitMQ demo/mg-replicaset - Normal Successful 10s KubeDB Ops-manager Operator Successfully Vertically Scaled Database - -``` - -Now, we are going to verify from one of the Pod yaml whether the resources of the replicaset database has updated to meet up the desired state, Let's check, - -```bash -$ kubectl get pod -n demo mg-replicaset-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "600m", - "memory": "1288490188800m" - }, - "requests": { - "cpu": "600m", - "memory": "1288490188800m" - } -} -``` - -The above output verifies that we have successfully scaled up the resources of the RabbitMQ replicaset database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete RabbitMQopsrequest -n demo mops-vscale-replicaset -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/vertical-scaling/sharding.md b/docs/guides/rabbitmq/scaling/vertical-scaling/sharding.md deleted file mode 100644 index c5c421d786..0000000000 --- a/docs/guides/rabbitmq/scaling/vertical-scaling/sharding.md +++ /dev/null @@ -1,438 +0,0 @@ ---- -title: Vertical Scaling Sharded RabbitMQ Cluster -menu: - docs_{{ .version }}: - identifier: mg-vertical-scaling-shard - name: Sharding - parent: mg-vertical-scaling - weight: 40 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Vertical Scale RabbitMQ Replicaset - -This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a RabbitMQ replicaset database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Replicaset](/docs/guides/RabbitMQ/clustering/replicaset.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Vertical Scaling Overview](/docs/guides/RabbitMQ/scaling/vertical-scaling/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -## Apply Vertical Scaling on Sharded Database - -Here, we are going to deploy a `RabbitMQ` sharded database using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. - -### Prepare RabbitMQ Sharded Database - -Now, we are going to deploy a `RabbitMQ` sharded database with version `4.4.26`. - -### Deploy RabbitMQ Sharded Database - -In this section, we are going to deploy a RabbitMQ sharded database. Then, in the next sections we will update the resources of various components (mongos, shard, configserver etc.) of the database using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sharding - namespace: demo -spec: - version: 4.4.26 - shardTopology: - configServer: - replicas: 3 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - mongos: - replicas: 2 - shard: - replicas: 3 - shards: 2 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/mg-shard.yaml -RabbitMQ.kubedb.com/mg-sharding created -``` - -Now, wait until `mg-sharding` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sharding 4.4.26 Ready 8m51s -``` - -Let's check the Pod containers resources of various components (mongos, shard, configserver etc.) of the database, - -```bash -$ kubectl get pod -n demo mg-sharding-mongos-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "500m", - "memory": "1Gi" - }, - "requests": { - "cpu": "500m", - "memory": "1Gi" - } -} - -$ kubectl get pod -n demo mg-sharding-configsvr-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "500m", - "memory": "1Gi" - }, - "requests": { - "cpu": "500m", - "memory": "1Gi" - } -} - -$ kubectl get pod -n demo mg-sharding-shard0-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "500m", - "memory": "1Gi" - }, - "requests": { - "cpu": "500m", - "memory": "1Gi" - } -} -``` - -You can see all the Pod of mongos, configserver and shard has default resources which is assigned by Kubedb operator. - -We are now ready to apply the `RabbitMQOpsRequest` CR to update the resources of mongos, configserver and shard nodes of this database. - -## Vertical Scaling of Shard - -Here, we are going to update the resources of the shard of the database to meet the desired resources after scaling. - -#### Create RabbitMQOpsRequest for shard - -In order to update the resources of the shard nodes, we have to create a `RabbitMQOpsRequest` CR with our desired resources. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-vscale-shard - namespace: demo -spec: - type: VerticalScaling - databaseRef: - name: mg-sharding - verticalScaling: - shard: - resources: - requests: - memory: "1100Mi" - cpu: "0.55" - limits: - memory: "1100Mi" - cpu: "0.55" - configServer: - resources: - requests: - memory: "1100Mi" - cpu: "0.55" - limits: - memory: "1100Mi" - cpu: "0.55" - mongos: - resources: - requests: - memory: "1100Mi" - cpu: "0.55" - limits: - memory: "1100Mi" - cpu: "0.55" - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mops-vscale-shard` database. -- `spec.type` specifies that we are performing `VerticalScaling` on our database. -- `spec.VerticalScaling.shard` specifies the desired resources after scaling for the shard nodes. -- `spec.VerticalScaling.configServer` specifies the desired resources after scaling for the configServer nodes. -- `spec.VerticalScaling.mongos` specifies the desired resources after scaling for the mongos nodes. -- `spec.VerticalScaling.arbiter` could also be specified in similar fashion to get the desired resources for arbiter pod. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -> **Note:** If you don't want to scale all the components together, you can only specify the components (shard, configServer and mongos) that you want to scale. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/vertical-scaling/mops-vscale-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-vscale-shard created -``` - -#### Verify RabbitMQ Shard resources updated successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the resources of `RabbitMQ` object and related `StatefulSets` and `Pods` of shard nodes. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-vscale-shard VerticalScaling Successful 8m21s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-vscale-shard -Name: mops-vscale-shard -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2022-10-26T10:45:56Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:databaseRef: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - f:verticalScaling: - .: - f:configServer: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - f:mongos: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - f:shard: - .: - f:limits: - .: - f:cpu: - f:memory: - f:requests: - .: - f:cpu: - f:memory: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-10-26T10:45:56Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-ops-manager - Operation: Update - Subresource: status - Time: 2022-10-26T10:52:28Z - Resource Version: 613274 - UID: a186cc72-3629-4034-bbf8-988839f6ec23 -Spec: - Apply: IfReady - Database Ref: - Name: mg-sharding - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: VerticalScaling - Vertical Scaling: - Config Server: - Limits: - Cpu: 0.55 - Memory: 1100Mi - Requests: - Cpu: 0.55 - Memory: 1100Mi - Mongos: - Limits: - Cpu: 0.55 - Memory: 1100Mi - Requests: - Cpu: 0.55 - Memory: 1100Mi - Shard: - Limits: - Cpu: 0.55 - Memory: 1100Mi - Requests: - Cpu: 0.55 - Memory: 1100Mi -Status: - Conditions: - Last Transition Time: 2022-10-26T10:48:06Z - Message: RabbitMQ ops request is vertically scaling database - Observed Generation: 1 - Reason: VerticalScaling - Status: True - Type: VerticalScaling - Last Transition Time: 2022-10-26T10:49:37Z - Message: Successfully Vertically Scaled ConfigServer Resources - Observed Generation: 1 - Reason: UpdateConfigServerResources - Status: True - Type: UpdateConfigServerResources - Last Transition Time: 2022-10-26T10:50:07Z - Message: Successfully Vertically Scaled Mongos Resources - Observed Generation: 1 - Reason: UpdateMongosResources - Status: True - Type: UpdateMongosResources - Last Transition Time: 2022-10-26T10:52:28Z - Message: Successfully Vertically Scaled Shard Resources - Observed Generation: 1 - Reason: UpdateShardResources - Status: True - Type: UpdateShardResources - Last Transition Time: 2022-10-26T10:52:28Z - Message: Successfully Vertically Scaled Database - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 4m51s KubeDB Ops-manager Operator Successfully paused RabbitMQ demo/mg-sharding - Normal Starting 4m51s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sharding-configsvr - Normal UpdateConfigServerResources 4m51s KubeDB Ops-manager Operator Successfully updated configServer Resources - Normal Starting 4m51s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sharding-configsvr - Normal UpdateConfigServerResources 4m51s KubeDB Ops-manager Operator Successfully updated configServer Resources - Normal PauseDatabase 4m51s KubeDB Ops-manager Operator Pausing RabbitMQ demo/mg-sharding - Normal UpdateConfigServerResources 3m20s KubeDB Ops-manager Operator Successfully Vertically Scaled ConfigServer Resources - Normal Starting 3m20s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sharding-mongos - Normal UpdateMongosResources 3m20s KubeDB Ops-manager Operator Successfully updated Mongos Resources - Normal UpdateShardResources 2m50s KubeDB Ops-manager Operator Successfully updated Shard Resources - Normal Starting 2m50s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sharding-shard0 - Normal Starting 2m50s KubeDB Ops-manager Operator Updating Resources of StatefulSet: mg-sharding-shard1 - Normal UpdateMongosResources 2m50s KubeDB Ops-manager Operator Successfully Vertically Scaled Mongos Resources - Normal UpdateShardResources 29s KubeDB Ops-manager Operator Successfully Vertically Scaled Shard Resources - Normal ResumeDatabase 29s KubeDB Ops-manager Operator Resuming RabbitMQ demo/mg-sharding - Normal ResumeDatabase 29s KubeDB Ops-manager Operator Successfully resumed RabbitMQ demo/mg-sharding - Normal Successful 29s KubeDB Ops-manager Operator Successfully Vertically Scaled Database - Normal UpdateShardResources 28s KubeDB Ops-manager Operator Successfully Vertically Scaled Shard Resources -``` - -Now, we are going to verify from one of the Pod yaml whether the resources of the shard nodes has updated to meet up the desired state, Let's check, - -```bash -$ kubectl get pod -n demo mg-sharding-shard0-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "550m", - "memory": "1100Mi" - }, - "requests": { - "cpu": "550m", - "memory": "1100Mi" - } -} - -$ kubectl get pod -n demo mg-sharding-configsvr-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "550m", - "memory": "1100Mi" - }, - "requests": { - "cpu": "550m", - "memory": "1100Mi" - } -} - -$ kubectl get pod -n demo mg-sharding-mongos-0 -o json | jq '.spec.containers[].resources' -{ - "limits": { - "cpu": "550m", - "memory": "1100Mi" - }, - "requests": { - "cpu": "550m", - "memory": "1100Mi" - } -} -``` - -The above output verifies that we have successfully scaled the resources of all components of the RabbitMQ sharded database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-shard -kubectl delete RabbitMQopsrequest -n demo mops-vscale-shard -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/scaling/vertical-scaling/standalone.md b/docs/guides/rabbitmq/scaling/vertical-scaling/vertical-scaling.md similarity index 86% rename from docs/guides/rabbitmq/scaling/vertical-scaling/standalone.md rename to docs/guides/rabbitmq/scaling/vertical-scaling/vertical-scaling.md index c0c0e063c6..2e76b8ee24 100644 --- a/docs/guides/rabbitmq/scaling/vertical-scaling/standalone.md +++ b/docs/guides/rabbitmq/scaling/vertical-scaling/vertical-scaling.md @@ -1,10 +1,10 @@ --- -title: Vertical Scaling Standalone RabbitMQ +title: Vertical Scaling RabbitMQ menu: docs_{{ .version }}: - identifier: mg-vertical-scaling-standalone - name: Standalone - parent: mg-vertical-scaling + identifier: rm-vertical-scaling-ops + name: rabbitmq-vertical-scaling + parent: rm-vertical-scaling weight: 20 menu_name: docs_{{ .version }} section_menu_id: guides @@ -23,9 +23,9 @@ This guide will show you how to use `KubeDB` Ops-manager operator to update the - Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Vertical Scaling Overview](/docs/guides/RabbitMQ/scaling/vertical-scaling/overview.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) + - [Vertical Scaling Overview](/docs/guides/rabbitmq/scaling/vertical-scaling/overview.md) To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. @@ -34,7 +34,7 @@ $ kubectl create ns demo namespace/demo created ``` -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. +> **Note:** YAML files used in this tutorial are stored in [docs/examples/rabbitmq](/docs/examples/rabbitmq) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. ## Apply Vertical Scaling on Standalone @@ -42,7 +42,7 @@ Here, we are going to deploy a `RabbitMQ` standalone using a supported version ### Prepare RabbitMQ Standalone Database -Now, we are going to deploy a `RabbitMQ` standalone database with version `4.4.26`. +Now, we are going to deploy a `RabbitMQ` standalone database with version `3.13.2`. ### Deploy RabbitMQ standalone @@ -52,10 +52,10 @@ In this section, we are going to deploy a RabbitMQ standalone database. Then, in apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: mg-standalone + name: rm-standalone namespace: demo spec: - version: "4.4.26" + version: "3.13.2" storageType: Durable storage: storageClassName: "standard" @@ -69,8 +69,8 @@ spec: Let's create the `RabbitMQ` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/mg-standalone.yaml -RabbitMQ.kubedb.com/mg-standalone created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/scaling/mg-standalone.yaml +rabbitmq.kubedb.com/rm-standalone created ``` Now, wait until `mg-standalone` has status `Ready`. i.e, @@ -78,13 +78,13 @@ Now, wait until `mg-standalone` has status `Ready`. i.e, ```bash $ kubectl get mg -n demo NAME VERSION STATUS AGE -mg-standalone 4.4.26 Ready 5m56s +rm-standalone 3.13.2 Ready 5m56s ``` Let's check the Pod containers resources, ```bash -$ kubectl get pod -n demo mg-standalone-0 -o json | jq '.spec.containers[].resources' +$ kubectl get pod -n demo rm-standalone-0 -o json | jq '.spec.containers[].resources' { "limits": { "cpu": "500m", @@ -113,14 +113,14 @@ In order to update the resources of the database, we have to create a `RabbitMQO apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest metadata: - name: mops-vscale-standalone + name: rmops-vscale-standalone namespace: demo spec: type: VerticalScaling databaseRef: - name: mg-standalone + name: rm-standalone verticalScaling: - standalone: + node: resources: requests: memory: "2Gi" @@ -128,9 +128,6 @@ spec: limits: memory: "2Gi" cpu: "1" - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 timeout: 5m apply: IfReady ``` @@ -140,13 +137,13 @@ Here, - `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `mops-vscale-standalone` database. - `spec.type` specifies that we are performing `VerticalScaling` on our database. - `spec.VerticalScaling.standalone` specifies the desired resources after scaling. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. +- Have a look [here](/docs/guides/rabbitmq/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/scaling/vertical-scaling/mops-vscale-standalone.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-vscale-standalone created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/scaling/vertical-scaling/rmops-vscale-standalone.yaml +rabbitmqopsrequest.ops.kubedb.com/rmops-vscale-standalone created ``` #### Verify RabbitMQ Standalone resources updated successfully @@ -165,8 +162,8 @@ mops-vscale-standalone VerticalScaling Successful 108s We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to scale the database. ```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-vscale-standalone -Name: mops-vscale-standalone +$ kubectl describe rabbitmqopsrequest -n demo rmops-vscale-standalone +Name: rmops-vscale-standalone Namespace: demo Labels: Annotations: @@ -281,7 +278,7 @@ Events: Now, we are going to verify from the Pod yaml whether the resources of the standalone database has updated to meet up the desired state, Let's check, ```bash -$ kubectl get pod -n demo mg-standalone-0 -o json | jq '.spec.containers[].resources' +$ kubectl get pod -n demo rm-standalone-0 -o json | jq '.spec.containers[].resources' { "limits": { "cpu": "1", @@ -301,6 +298,6 @@ The above output verifies that we have successfully scaled up the resources of t To clean up the Kubernetes resources created by this tutorial, run: ```bash -kubectl delete mg -n demo mg-standalone -kubectl delete RabbitMQopsrequest -n demo mops-vscale-standalone +kubectl delete rm -n demo rm-standalone +kubectl delete rabbitmqopsrequest -n demo rmops-vscale-standalone ``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/tls/_index.md b/docs/guides/rabbitmq/tls/_index.md index c4cd263b5b..4333af9839 100755 --- a/docs/guides/rabbitmq/tls/_index.md +++ b/docs/guides/rabbitmq/tls/_index.md @@ -2,9 +2,9 @@ title: Run RabbitMQ with TLS menu: docs_{{ .version }}: - identifier: mg-tls + identifier: rm-tls name: TLS/SSL Encryption - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 45 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/rabbitmq/tls/overview.md b/docs/guides/rabbitmq/tls/overview.md index d14677c2d7..70a3a50ddc 100644 --- a/docs/guides/rabbitmq/tls/overview.md +++ b/docs/guides/rabbitmq/tls/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ TLS/SSL Encryption Overview menu: docs_{{ .version }}: - identifier: mg-tls-overview + identifier: rm-tls-overview name: Overview - parent: mg-tls + parent: rm-tls weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -27,24 +27,18 @@ To issue a certificate, the following crd of `cert-manager` is used: KubeDB uses following crd fields to enable SSL/TLS encryption in `RabbitMQ`. - `spec:` - - `sslMode` - `tls:` - `issuerRef` - `certificates` - - `clusterAuthMode` -Read about the fields in details from [RabbitMQ concept](/docs/guides/RabbitMQ/concepts/RabbitMQ.md), + - `enableSSL` +Read about the fields in details from [RabbitMQ concept](/docs/guides/rabbitmq/concepts/rabbitmq.md), -When, `sslMode` is set to `requireSSL`, the users must specify the `tls.issuerRef` field. `KubeDB` uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets using `Issuer/ClusterIssuers` specification. These certificates secrets including `ca.crt`, `tls.crt` and `tls.key` etc. are used to configure `RabbitMQ` server, exporter etc. respectively. +When, `enableSSL` is set to `true`, the users must specify the `tls.issuerRef` field. `KubeDB` uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets using `Issuer/ClusterIssuers` specification. These certificates secrets including `ca.crt`, `tls.crt` and `tls.key` etc. are used to configure `RabbitMQ` server, exporter etc. respectively. ## How TLS/SSL configures in RabbitMQ The following figure shows how `KubeDB` enterprise used to configure TLS/SSL in RabbitMQ. Open the image in a new tab to see the enlarged version. -
-Deploy RabbitMQ with TLS/SSL -
Fig: Deploy RabbitMQ with TLS/SSL
-
- Deploying RabbitMQ with TLS/SSL configuration process consists of the following steps: 1. At first, a user creates a `Issuer/ClusterIssuer` cr. diff --git a/docs/guides/rabbitmq/tls/replicaset.md b/docs/guides/rabbitmq/tls/replicaset.md deleted file mode 100644 index 9dcfd1a483..0000000000 --- a/docs/guides/rabbitmq/tls/replicaset.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: RabbitMQ ReplicaSet TLS/SSL Encryption -menu: - docs_{{ .version }}: - identifier: mg-tls-replicaset - name: Replicaset - parent: mg-tls - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Run RabbitMQ with TLS/SSL (Transport Encryption) - -KubeDB supports providing TLS/SSL encryption (via, `sslMode` and `clusterAuthMode`) for RabbitMQ. This tutorial will show you how to use KubeDB to run a RabbitMQ database with TLS/SSL encryption. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. - -- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). - -- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. - - ```bash - $ kubectl create ns demo - namespace/demo created - ``` - -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Overview - -KubeDB uses following crd fields to enable SSL/TLS encryption in RabbitMQ. - -- `spec:` - - `sslMode` - - `tls:` - - `issuerRef` - - `certificate` - - `clusterAuthMode` - -Read about the fields in details in [RabbitMQ concept](/docs/guides/RabbitMQ/concepts/RabbitMQ.md), - -`sslMode`, and `tls` is applicable for all types of RabbitMQ (i.e., `standalone`, `replicaset` and `sharding`), while `clusterAuthMode` provides [ClusterAuthMode](https://docs.RabbitMQ.com/manual/reference/program/mongod/#cmdoption-mongod-clusterauthmode) for RabbitMQ clusters (i.e., `replicaset` and `sharding`). - -When, SSLMode is anything other than `disabled`, users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `mongo.pem` and `client.pem`. - -The subject of `client.pem` certificate is added as `root` user in `$external` RabbitMQ database. So, user can use this client certificate for `RabbitMQ-X509` `authenticationMechanism`. - -## Create Issuer/ ClusterIssuer - -We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in RabbitMQ. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. - -- Start off by generating you ca certificates using openssl. - -```bash -openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=mongo/O=kubedb" -``` - -- Now create a ca-secret using the certificate files you have just generated. - -```bash -kubectl create secret tls mongo-ca \ - --cert=ca.crt \ - --key=ca.key \ - --namespace=demo -``` - -Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: - -```yaml -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: mongo-ca-issuer - namespace: demo -spec: - ca: - secretName: mongo-ca -``` - -Apply the `YAML` file: - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/issuer.yaml -issuer.cert-manager.io/mongo-ca-issuer created -``` - -## TLS/SSL encryption in RabbitMQ Replicaset - -Below is the YAML for RabbitMQ Replicaset. Here, [`spec.sslMode`](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#specsslMode) specifies `sslMode` for `replicaset` (which is `requireSSL`) and [`spec.clusterAuthMode`](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#specclusterAuthMode) provides `clusterAuthMode` for RabbitMQ replicaset nodes (which is `x509`). - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mgo-rs-tls - namespace: demo -spec: - version: "4.4.26" - sslMode: requireSSL - tls: - issuerRef: - apiGroup: "cert-manager.io" - kind: Issuer - name: mongo-ca-issuer - clusterAuthMode: x509 - replicas: 4 - replicaSet: - name: rs0 - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -### Deploy RabbitMQ Replicaset - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/mg-replicaset-ssl.yaml -RabbitMQ.kubedb.com/mgo-rs-tls created -``` - -Now, wait until `mgo-rs-tls created` has status `Ready`. i.e, - -```bash -$ watch kubectl get mg -n demo -Every 2.0s: kubectl get RabbitMQ -n demo -NAME VERSION STATUS AGE -mgo-rs-tls 4.4.26 Ready 4m10s -``` - -### Verify TLS/SSL in RabbitMQ Replicaset - -Now, connect to this database through [mongo-shell](https://docs.RabbitMQ.com/v4.0/mongo/) and verify if `SSLMode` and `ClusterAuthMode` has been set up as intended. - -```bash -$ kubectl describe secret -n demo mgo-rs-tls-client-cert -Name: mgo-rs-tls-client-cert -Namespace: demo -Labels: -Annotations: cert-manager.io/alt-names: - cert-manager.io/certificate-name: mgo-rs-tls-client-cert - cert-manager.io/common-name: root - cert-manager.io/ip-sans: - cert-manager.io/issuer-group: cert-manager.io - cert-manager.io/issuer-kind: Issuer - cert-manager.io/issuer-name: mongo-ca-issuer - cert-manager.io/uri-sans: - -Type: kubernetes.io/tls - -Data -==== -ca.crt: 1147 bytes -tls.crt: 1172 bytes -tls.key: 1679 bytes -``` - -Now, Let's exec into a RabbitMQ container and find out the username to connect in a mongo shell, - -```bash -$ kubectl exec -it mgo-rs-tls-0 -n demo bash -root@mgo-rs-tls-0:/$ ls /var/run/RabbitMQ/tls -ca.crt client.pem mongo.pem -root@mgo-rs-tls-0:/$ openssl x509 -in /var/run/RabbitMQ/tls/client.pem -inform PEM -subject -nameopt RFC2253 -noout -subject=CN=root,O=kubedb -``` - -Now, we can connect using `CN=root,O=kubedb` as root to connect to the mongo shell, - -```bash -root@mgo-rs-tls-0:/$ mongo --tls --tlsCAFile /var/run/RabbitMQ/tls/ca.crt --tlsCertificateKeyFile /var/run/RabbitMQ/tls/client.pem admin --host localhost --authenticationMechanism RabbitMQ-X509 --authenticationDatabase='$external' -u "CN=root,O=kubedb" --quiet -Welcome to the RabbitMQ shell. -For interactive help, type "help". -For more comprehensive documentation, see - http://docs.RabbitMQ.org/ -Questions? Try the support group - http://groups.google.com/group/RabbitMQ-user -rs0:PRIMARY> -``` - -We are connected to the mongo shell. Let's run some command to verify the sslMode and the user, - -```bash -rs0:PRIMARY> db.adminCommand({ getParameter:1, sslMode:1 }) -{ - "sslMode" : "requireSSL", - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1599490676, 1), - "signature" : { - "hash" : BinData(0,"/wQ4pf4HVi1T7SOyaB3pXO56j64="), - "keyId" : NumberLong("6869759546676477954") - } - }, - "operationTime" : Timestamp(1599490676, 1) -} - -rs0:PRIMARY> use $external -switched to db $external - -rs0:PRIMARY> show users -{ - "_id" : "$external.CN=root,O=kubedb", - "userId" : UUID("9cebbcf4-74bf-47dd-a485-1604125058da"), - "user" : "CN=root,O=kubedb", - "db" : "$external", - "roles" : [ - { - "role" : "root", - "db" : "admin" - } - ], - "mechanisms" : [ - "external" - ] -} -> exit -bye -``` - -You can see here that, `sslMode` is set to `requireSSL` and a user is created in `$external` with name `"CN=root,O=kubedb"`. - -## Changing the SSLMode & ClusterAuthMode - -User can update `sslMode` & `ClusterAuthMode` if needed. Some changes may be invalid from RabbitMQ end, like using `sslMode: disabled` with `clusterAuthMode: x509`. - -The good thing is, **KubeDB operator will throw error for invalid SSL specs while creating/updating the RabbitMQ object.** i.e., - -```bash -$ kubectl patch -n demo mg/mgo-rs-tls -p '{"spec":{"sslMode": "disabled","clusterAuthMode": "x509"}}' --type="merge" -Error from server (Forbidden): admission webhook "RabbitMQ.validators.kubedb.com" denied the request: can't have disabled set to RabbitMQ.spec.sslMode when RabbitMQ.spec.clusterAuthMode is set to x509 -``` - -To **update from Keyfile Authentication to x.509 Authentication**, change the `sslMode` and `clusterAuthMode` in recommended sequence as suggested in [official documentation](https://docs.RabbitMQ.com/manual/tutorial/update-keyfile-to-x509/). Each time after changing the specs, follow the procedure that is described above to verify the changes of `sslMode` and `clusterAuthMode` inside the database. - -## Cleaning up - -To cleanup the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete RabbitMQ -n demo mgo-rs-tls -kubectl delete issuer -n demo mongo-ca-issuer -kubectl delete ns demo -``` - -## Next Steps - -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- [Backup and Restore](/docs/guides/RabbitMQ/backup/overview/index.md) RabbitMQ databases using Stash. -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/tls/sharding.md b/docs/guides/rabbitmq/tls/sharding.md deleted file mode 100644 index 90cdcd082e..0000000000 --- a/docs/guides/rabbitmq/tls/sharding.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: RabbitMQ Shard TLS/SSL Encryption -menu: - docs_{{ .version }}: - identifier: mg-tls-shard - name: Sharding - parent: mg-tls - weight: 40 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Run RabbitMQ with TLS/SSL (Transport Encryption) - -KubeDB supports providing TLS/SSL encryption (via, `sslMode` and `clusterAuthMode`) for RabbitMQ. This tutorial will show you how to use KubeDB to run a RabbitMQ database with TLS/SSL encryption. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. - -- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). - -- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. - - ```bash - $ kubectl create ns demo - namespace/demo created - ``` - -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Overview - -KubeDB uses following crd fields to enable SSL/TLS encryption in RabbitMQ. - -- `spec:` - - `sslMode` - - `tls:` - - `issuerRef` - - `certificate` - - `clusterAuthMode` - -Read about the fields in details in [RabbitMQ concept](/docs/guides/RabbitMQ/concepts/RabbitMQ.md), - -`sslMode`, and `tls` is applicable for all types of RabbitMQ (i.e., `standalone`, `replicaset` and `sharding`), while `clusterAuthMode` provides [ClusterAuthMode](https://docs.RabbitMQ.com/manual/reference/program/mongod/#cmdoption-mongod-clusterauthmode) for RabbitMQ clusters (i.e., `replicaset` and `sharding`). - -When, SSLMode is anything other than `disabled`, users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `mongo.pem` and `client.pem`. - -The subject of `client.pem` certificate is added as `root` user in `$external` RabbitMQ database. So, user can use this client certificate for `RabbitMQ-X509` `authenticationMechanism`. - -## Create Issuer/ ClusterIssuer - -We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in RabbitMQ. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. - -- Start off by generating you ca certificates using openssl. - -```bash -openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=mongo/O=kubedb" -``` - -- Now create a ca-secret using the certificate files you have just generated. - -```bash -kubectl create secret tls mongo-ca \ - --cert=ca.crt \ - --key=ca.key \ - --namespace=demo -``` - -Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: - -```yaml -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: mongo-ca-issuer - namespace: demo -spec: - ca: - secretName: mongo-ca -``` - -Apply the `YAML` file: - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/issuer.yaml -issuer.cert-manager.io/mongo-ca-issuer created -``` - -## TLS/SSL encryption in RabbitMQ Sharding - -Below is the YAML for RabbitMQ Sharding. Here, [`spec.sslMode`](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#specsslMode) specifies `sslMode` for `sharding` and [`spec.clusterAuthMode`](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#specclusterAuthMode) provides `clusterAuthMode` for sharding servers. - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mongo-sh-tls - namespace: demo -spec: - version: "4.4.26" - sslMode: requireSSL - tls: - issuerRef: - apiGroup: "cert-manager.io" - kind: Issuer - name: mongo-ca-issuer - clusterAuthMode: x509 - shardTopology: - configServer: - replicas: 2 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - mongos: - replicas: 2 - shard: - replicas: 2 - shards: 2 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - storageType: Durable - terminationPolicy: WipeOut -``` - -### Deploy RabbitMQ Sharding - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/mg-shard-ssl.yaml -RabbitMQ.kubedb.com/mongo-sh-tls created -``` - -Now, wait until `mongo-sh-tls created` has status `Ready`. ie, - -```bash -$ watch kubectl get mg -n demo -Every 2.0s: kubectl get RabbitMQ -n demo -NAME VERSION STATUS AGE -mongo-sh-tls 4.4.26 Ready 4m24s -``` - -### Verify TLS/SSL in RabbitMQ Sharding - -Now, connect to `mongos` component of this database through [mongo-shell](https://docs.RabbitMQ.com/v4.0/mongo/) and verify if `SSLMode` and `ClusterAuthMode` has been set up as intended. - -```bash -$ kubectl describe secret -n demo mongo-sh-tls-client-cert -Name: mongo-sh-tls-client-cert -Namespace: demo -Labels: -Annotations: cert-manager.io/alt-names: - cert-manager.io/certificate-name: mongo-sh-tls-client-cert - cert-manager.io/common-name: root - cert-manager.io/ip-sans: - cert-manager.io/issuer-group: cert-manager.io - cert-manager.io/issuer-kind: Issuer - cert-manager.io/issuer-name: mongo-ca-issuer - cert-manager.io/uri-sans: - -Type: kubernetes.io/tls - -Data -==== -ca.crt: 1147 bytes -tls.crt: 1172 bytes -tls.key: 1679 bytes -``` - -Now, Let's exec into a RabbitMQ container and find out the username to connect in a mongo shell, - -```bash -$ kubectl exec -it mongo-sh-tls-mongos-0 -n demo bash -root@mongo-sh-tls-mongos-0:/$ ls /var/run/RabbitMQ/tls -ca.crt client.pem mongo.pem -RabbitMQ@mgo-sh-tls-mongos-0:/$ openssl x509 -in /var/run/RabbitMQ/tls/client.pem -inform PEM -subject -nameopt RFC2253 -noout -subject=CN=root,O=kubedb -``` - -Now, we can connect using `CN=root,O=kubedb` as root to connect to the mongo shell, - -```bash -root@mongo-sh-tls-mongos-0:/# mongo --tls --tlsCAFile /var/run/RabbitMQ/tls/ca.crt --tlsCertificateKeyFile /var/run/RabbitMQ/tls/client.pem admin --host localhost --authenticationMechanism RabbitMQ-X509 --authenticationDatabase='$external' -u "CN=root,O=kubedb" --quiet -Welcome to the RabbitMQ shell. -For interactive help, type "help". -For more comprehensive documentation, see - http://docs.RabbitMQ.org/ -Questions? Try the support group - http://groups.google.com/group/RabbitMQ-user -mongos> -``` - -We are connected to the mongo shell. Let's run some command to verify the sslMode and the user, - -```bash -mongos> db.adminCommand({ getParameter:1, sslMode:1 }) -{ - "sslMode" : "requireSSL", - "ok" : 1, - "operationTime" : Timestamp(1599491398, 1), - "$clusterTime" : { - "clusterTime" : Timestamp(1599491398, 1), - "signature" : { - "hash" : BinData(0,"cn2Mhfy2blonon3jPz6Daen0nnc="), - "keyId" : NumberLong("6869760899591176209") - } - } -} -mongos> use $external -switched to db $external -mongos> show users -{ - "_id" : "$external.CN=root,O=kubedb", - "userId" : UUID("4865dda6-5e31-4b79-a085-7d6fea51c9be"), - "user" : "CN=root,O=kubedb", - "db" : "$external", - "roles" : [ - { - "role" : "root", - "db" : "admin" - } - ], - "mechanisms" : [ - "external" - ] -} -> exit -bye -``` - -You can see here that, `sslMode` is set to `requireSSL` and `clusterAuthMode` is set to `x509` and also an user is created in `$external` with name `"CN=root,O=kubedb"`. - -## Changing the SSLMode & ClusterAuthMode - -User can update `sslMode` & `ClusterAuthMode` if needed. Some changes may be invalid from RabbitMQ end, like using `sslMode: disabled` with `clusterAuthMode: x509`. - -The good thing is, **KubeDB operator will throw error for invalid SSL specs while creating/updating the RabbitMQ object.** i.e., - -```bash -$ kubectl patch -n demo mg/mgo-sh-tls -p '{"spec":{"sslMode": "disabled","clusterAuthMode": "x509"}}' --type="merge" -Error from server (Forbidden): admission webhook "RabbitMQ.validators.kubedb.com" denied the request: can't have disabled set to RabbitMQ.spec.sslMode when RabbitMQ.spec.clusterAuthMode is set to x509 -``` - -To **update from Keyfile Authentication to x.509 Authentication**, change the `sslMode` and `clusterAuthMode` in recommended sequence as suggested in [official documentation](https://docs.RabbitMQ.com/manual/tutorial/update-keyfile-to-x509/). Each time after changing the specs, follow the procedure that is described above to verify the changes of `sslMode` and `clusterAuthMode` inside the database. - -## Cleaning up - -To cleanup the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete RabbitMQ -n demo mongo-sh-tls -kubectl delete issuer -n demo mongo-ca-issuer -kubectl delete ns demo -``` - -## Next Steps - -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- [Backup and Restore](/docs/guides/RabbitMQ/backup/overview/index.md) RabbitMQ databases using Stash. -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/tls/standalone.md b/docs/guides/rabbitmq/tls/standalone.md deleted file mode 100644 index c23b6f48ef..0000000000 --- a/docs/guides/rabbitmq/tls/standalone.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -title: RabbitMQ Standalone TLS/SSL Encryption -menu: - docs_{{ .version }}: - identifier: mg-tls-standalone - name: Standalone - parent: mg-tls - weight: 20 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Run RabbitMQ with TLS/SSL (Transport Encryption) - -KubeDB supports providing TLS/SSL encryption (via, `sslMode` and `clusterAuthMode`) for RabbitMQ. This tutorial will show you how to use KubeDB to run a RabbitMQ database with TLS/SSL encryption. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. - -- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). - -- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. - - ```bash - $ kubectl create ns demo - namespace/demo created - ``` - -> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Overview - -KubeDB uses following crd fields to enable SSL/TLS encryption in RabbitMQ. - -- `spec:` - - `sslMode` - - `tls:` - - `issuerRef` - - `certificate` - - `clusterAuthMode` - -Read about the fields in details in [RabbitMQ concept](/docs/guides/RabbitMQ/concepts/RabbitMQ.md), - -`sslMode`, and `tls` is applicable for all types of RabbitMQ (i.e., `standalone`, `replicaset` and `sharding`), while `clusterAuthMode` provides [ClusterAuthMode](https://docs.RabbitMQ.com/manual/reference/program/mongod/#cmdoption-mongod-clusterauthmode) for RabbitMQ clusters (i.e., `replicaset` and `sharding`). - -When, SSLMode is anything other than `disabled`, users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `mongo.pem` and `client.pem`. - -The subject of `client.pem` certificate is added as `root` user in `$external` RabbitMQ database. So, user can use this client certificate for `RabbitMQ-X509` `authenticationMechanism`. - -## Create Issuer/ ClusterIssuer - -We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in RabbitMQ. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. - -- Start off by generating you ca certificates using openssl. - -```bash -openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=mongo/O=kubedb" -``` - -- Now create a ca-secret using the certificate files you have just generated. - -```bash -kubectl create secret tls mongo-ca \ - --cert=ca.crt \ - --key=ca.key \ - --namespace=demo -``` - -Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: - -```yaml -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: mongo-ca-issuer - namespace: demo -spec: - ca: - secretName: mongo-ca -``` - -Apply the `YAML` file: - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/issuer.yaml -issuer.cert-manager.io/mongo-ca-issuer created -``` - -## TLS/SSL encryption in RabbitMQ Standalone - -Below is the YAML for RabbitMQ Standalone. Here, [`spec.sslMode`](/docs/guides/RabbitMQ/concepts/RabbitMQ.md#specsslMode) specifies `sslMode` for `standalone` (which is `requireSSL`). - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mgo-tls - namespace: demo -spec: - version: "4.4.26" - sslMode: requireSSL - tls: - issuerRef: - apiGroup: "cert-manager.io" - kind: Issuer - name: mongo-ca-issuer - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -### Deploy RabbitMQ Standalone - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/mg-standalone-ssl.yaml -RabbitMQ.kubedb.com/mgo-tls created -``` - -Now, wait until `mgo-tls created` has status `Ready`. i.e, - -```bash -$ watch kubectl get mg -n demo -Every 2.0s: kubectl get RabbitMQ -n demo -NAME VERSION STATUS AGE -mgo-tls 4.4.26 Ready 14s -``` - -### Verify TLS/SSL in RabbitMQ Standalone - -Now, connect to this database through [mongo-shell](https://docs.RabbitMQ.com/v4.0/mongo/) and verify if `SSLMode` has been set up as intended (i.e, `requireSSL`). - -```bash -$ kubectl describe secret -n demo mgo-tls-client-cert -Name: mgo-tls-client-cert -Namespace: demo -Labels: -Annotations: cert-manager.io/alt-names: - cert-manager.io/certificate-name: mgo-tls-client-cert - cert-manager.io/common-name: root - cert-manager.io/ip-sans: - cert-manager.io/issuer-group: cert-manager.io - cert-manager.io/issuer-kind: Issuer - cert-manager.io/issuer-name: mongo-ca-issuer - cert-manager.io/uri-sans: - -Type: kubernetes.io/tls - -Data -==== -tls.crt: 1172 bytes -tls.key: 1679 bytes -ca.crt: 1147 bytes -``` - -Now, Let's exec into a RabbitMQ container and find out the username to connect in a mongo shell, - -```bash -$ kubectl exec -it mgo-tls-0 -n demo bash -RabbitMQ@mgo-tls-0:/$ ls /var/run/RabbitMQ/tls -ca.crt client.pem mongo.pem -RabbitMQ@mgo-tls-0:/$ openssl x509 -in /var/run/RabbitMQ/tls/client.pem -inform PEM -subject -nameopt RFC2253 -noout -subject=CN=root,O=kubedb -``` - -Now, we can connect using `CN=root,O=kubedb` as root to connect to the mongo shell, - -```bash -RabbitMQ@mgo-tls-0:/$ mongo --tls --tlsCAFile /var/run/RabbitMQ/tls/ca.crt --tlsCertificateKeyFile /var/run/RabbitMQ/tls/client.pem admin --host localhost --authenticationMechanism RabbitMQ-X509 --authenticationDatabase='$external' -u "CN=root,O=kubedb" --quiet -> -``` - -We are connected to the mongo shell. Let's run some command to verify the sslMode and the user, - -```bash -> db.adminCommand({ getParameter:1, sslMode:1 }) -{ "sslMode" : "requireSSL", "ok" : 1 } - -> use $external -switched to db $external - -> show users -{ - "_id" : "$external.CN=root,O=kubedb", - "userId" : UUID("d2ddf121-9398-400b-b477-0e8bcdd47746"), - "user" : "CN=root,O=kubedb", - "db" : "$external", - "roles" : [ - { - "role" : "root", - "db" : "admin" - } - ], - "mechanisms" : [ - "external" - ] -} -> exit -bye -``` - -You can see here that, `sslMode` is set to `requireSSL` and a user is created in `$external` with name `"CN=root,O=kubedb"`. - -## Changing the SSLMode & ClusterAuthMode - -User can update `sslMode` & `ClusterAuthMode` if needed. Some changes may be invalid from RabbitMQ end, like using `sslMode: disabled` with `clusterAuthMode: x509`. - -The good thing is, **KubeDB operator will throw error for invalid SSL specs while creating/updating the RabbitMQ object.** i.e., - -```bash -$ kubectl patch -n demo mg/mgo-tls -p '{"spec":{"sslMode": "disabled","clusterAuthMode": "x509"}}' --type="merge" -Error from server (Forbidden): admission webhook "RabbitMQ.validators.kubedb.com" denied the request: can't have disabled set to RabbitMQ.spec.sslMode when RabbitMQ.spec.clusterAuthMode is set to x509 -``` - -To **update from Keyfile Authentication to x.509 Authentication**, change the `sslMode` and `clusterAuthMode` in recommended sequence as suggested in [official documentation](https://docs.RabbitMQ.com/manual/tutorial/update-keyfile-to-x509/). Each time after changing the specs, follow the procedure that is described above to verify the changes of `sslMode` and `clusterAuthMode` inside the database. - -## Cleaning up - -To cleanup the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete RabbitMQ -n demo mgo-tls -kubectl delete issuer -n demo mongo-ca-issuer -kubectl delete ns demo -``` - -## Next Steps - -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- [Backup and Restore](/docs/guides/RabbitMQ/backup/overview/index.md) RabbitMQ databases using Stash. -- Initialize [RabbitMQ with Script](/docs/guides/RabbitMQ/initialization/using-script.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). -- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/RabbitMQ/monitoring/using-builtin-prometheus.md). -- Use [private Docker registry](/docs/guides/RabbitMQ/private-registry/using-private-registry.md) to deploy RabbitMQ with KubeDB. -- Use [kubedb cli](/docs/guides/RabbitMQ/cli/cli.md) to manage databases like kubectl for Kubernetes. -- Detail concepts of [RabbitMQ object](/docs/guides/RabbitMQ/concepts/RabbitMQ.md). -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/tls/tls.md b/docs/guides/rabbitmq/tls/tls.md new file mode 100644 index 0000000000..6a7f764036 --- /dev/null +++ b/docs/guides/rabbitmq/tls/tls.md @@ -0,0 +1,147 @@ +--- +title: RabbitMQ TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: rm-tls-describe + name: rabbitmq-tls + parent: mg-tls + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run RabbitMQ with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption (via, `.spec.enableSSL`) for RabbitMQ. This tutorial will show you how to use KubeDB to run a RabbitMQ database with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in RabbitMQ. + +- `spec:` + - `tls:` + - `issuerRef` + - `certificate` + - `enableSSL` + +Read about the fields in details in [RabbitMQ concept](/docs/guides/rabbitmq/concepts/rabbitmq.md), + +When, SSLMode is anything other than `disabled`, users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt` and `tls.key`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in RabbitMQ. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=rabbitmq/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls rabbitmq-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: rabbitmq-ca-issuer + namespace: demo +spec: + ca: + secretName: rabbitmq-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/tls/issuer.yaml +issuer.cert-manager.io/rabbitmq-ca-issuer created +``` + +## TLS/SSL encryption in RabbitMQ Standalone + +Below is the YAML for RabbitMQ Standalone. Here, [`spec.sslMode`](/docs/guides/rabbitmq/concepts/rabbitmq.md#spectls) specifies tls configurations required for operator to create corresponding resources. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: RabbitMQ +metadata: + name: rabbitmq-tls + namespace: demo +spec: + version: "3.13.2" + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: rabbitmq-ca-issuer + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +### Deploy RabbitMQ Standalone + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/tls/rm-standalone-ssl.yaml +rabbitmq.kubedb.com/rabbitmq-tls created +``` + +Now, wait until `rabbitmq-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get rm -n demo +Every 2.0s: kubectl get rm -n demo +NAME VERSION STATUS AGE +rabbitmq-tls 3.13.2 Ready 14s +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete rabbitmq -n demo rabbitmq-tls +kubectl delete issuer -n demo rabbitmq-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +(/docs/guides/RabbitMQ/monitoring/using-prometheus-operator.md). +- Monitor your RabbitMQ database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/rabbitmq/monitoring/using-builtin-prometheus.md). +- Detail concepts of [RabbitMQ object](/docs/guides/rabbitmq/concepts/rabbitmq.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rabbitmq/update-version/overview.md b/docs/guides/rabbitmq/update-version/overview.md index 4b29c0568f..0844dfd8e8 100644 --- a/docs/guides/rabbitmq/update-version/overview.md +++ b/docs/guides/rabbitmq/update-version/overview.md @@ -19,25 +19,20 @@ This guide will give you an overview on how KubeDB Ops-manager operator update t ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How update version Process Works The following diagram shows how KubeDB Ops-manager operator used to update the version of `RabbitMQ`. Open the image in a new tab to see the enlarged version. -
-  updating Process of RabbitMQ -
Fig: updating Process of RabbitMQ
-
- The updating process consists of the following steps: 1. At first, a user creates a `RabbitMQ` Custom Resource (CR). 2. `KubeDB` Provisioner operator watches the `RabbitMQ` CR. -3. When the operator finds a `RabbitMQ` CR, it creates required number of `PetSets` and other kubernative native resources like secrets, services, etc. +3. When the operator finds a `RabbitMQ` CR, it creates required number of `PetSets` and other kubernetes native resources like secrets, services, etc. 4. Then, in order to update the version of the `RabbitMQ` database the user creates a `RabbitMQOpsRequest` CR with the desired version. @@ -51,4 +46,4 @@ The updating process consists of the following steps: 9. After successfully updating of `RabbitMQ` object, the `KubeDB` Ops-manager operator resumes the `RabbitMQ` object so that the `KubeDB` Provisioner operator can resume its usual operations. -In the next doc, we are going to show a step by step guide on updating of a RabbitMQ database using updateVersion operation. \ No newline at end of file +In the next doc, we are going to show a step-by-step guide on updating of a RabbitMQ database using updateVersion operation. \ No newline at end of file diff --git a/docs/guides/rabbitmq/update-version/cluster.md b/docs/guides/rabbitmq/update-version/update-version.md similarity index 78% rename from docs/guides/rabbitmq/update-version/cluster.md rename to docs/guides/rabbitmq/update-version/update-version.md index 915c87667f..3015497df9 100644 --- a/docs/guides/rabbitmq/update-version/cluster.md +++ b/docs/guides/rabbitmq/update-version/update-version.md @@ -12,9 +12,9 @@ section_menu_id: guides > New to KubeDB? Please start [here](/docs/README.md). -# update version of RabbitMQ ReplicaSet +# update version of RabbitMQ Cluster -This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `RabbitMQ` ReplicaSet. +This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `RabbitMQ` Cluster. ## Before You Begin @@ -25,7 +25,7 @@ This guide will show you how to use `KubeDB` Ops-manager operator to update the - You should be familiar with the following `KubeDB` concepts: - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) - - [Updating Overview](/docs/guides/rabbitmq/concepts/update-version/overview.md) + - [Updating Overview](/docs/guides/rabbitmq/update-version/overview.md) To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. @@ -34,13 +34,13 @@ $ kubectl create ns demo namespace/demo created ``` -> **Note:** YAML files used in this tutorial are stored in [docs/examples/RabbitMQ](/docs/examples/RabbitMQ) directory of [kubedb/docs](https://github.com/kube/docs) repository. +> **Note:** YAML files used in this tutorial are stored in [docs/examples/rabbitmq](/docs/examples/rabbitmq) directory of [kubedb/docs](https://github.com/kube/docs) repository. -## Prepare RabbitMQ ReplicaSet Database +## Prepare RabbitMQ cluster -Now, we are going to deploy a `RabbitMQ` replicaset database with version `3.6.8`. +Now, we are going to deploy a `RabbitMQ` cluster with version `3.12.12`. -### Deploy RabbitMQ replicaset +### Deploy RabbitMQ In this section, we are going to deploy a RabbitMQ replicaset database. Then, in the next section we will update the version of the database using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, @@ -48,12 +48,10 @@ In this section, we are going to deploy a RabbitMQ replicaset database. Then, in apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: mg-replicaset + name: rm-cluster namespace: demo spec: - version: "4.4.26" - replicaSet: - name: "replicaset" + version: "3.12.12" replicas: 3 storageType: Durable storage: @@ -68,23 +66,23 @@ spec: Let's create the `RabbitMQ` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/update-version/mg-replicaset.yaml -RabbitMQ.kubedb.com/mg-replicaset created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/update-version/rm-replicaset.yaml +rabbitmq.kubedb.com/rm-cluster created ``` -Now, wait until `mg-replicaset` created has status `Ready`. i.e, +Now, wait until `rm-cluster` created has status `Ready`. i.e, ```bash -$ k get RabbitMQ -n demo +$ kubectl get rm -n demo NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 109s +rm-cluster 3.12.12 Ready 109s ``` We are now ready to apply the `RabbitMQOpsRequest` CR to update this database. ### update RabbitMQ Version -Here, we are going to update `RabbitMQ` replicaset from `3.6.8` to `4.0.5`. +Here, we are going to update `RabbitMQ` replicaset from `3.12.12` to `3.13.2`. #### Create RabbitMQOpsRequest: @@ -94,17 +92,14 @@ In order to update the version of the replicaset database, we have to create a ` apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest metadata: - name: mops-replicaset-update + name: rm-cluster-update namespace: demo spec: type: UpdateVersion databaseRef: - name: mg-replicaset + name: rm-cluster updateVersion: - targetVersion: 4.4.26 - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 + targetVersion: 3.13.2 timeout: 5m apply: IfReady ``` @@ -113,34 +108,34 @@ Here, - `spec.databaseRef.name` specifies that we are performing operation on `mg-replicaset` RabbitMQ database. - `spec.type` specifies that we are going to perform `UpdateVersion` on our database. -- `spec.updateVersion.targetVersion` specifies the expected version of the database `4.0.5`. -- Have a look [here](/docs/guides/RabbitMQ/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. +- `spec.updateVersion.targetVersion` specifies the expected version of the database `3.13.2`. +- Have a look [here](/docs/guides/rabbitmq/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/update-version/mops-update-replicaset .yaml -RabbitMQopsrequest.ops.kubedb.com/mops-replicaset-update created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/update-version/rmops-update-replicaset .yaml +rabbitmqopsrequest.ops.kubedb.com/rmops-replicaset-update created ``` #### Verify RabbitMQ version updated successfully -If everything goes well, `KubeDB` Ops-manager operator will update the image of `RabbitMQ` object and related `StatefulSets` and `Pods`. +If everything goes well, `KubeDB` Ops-manager operator will update the image of `RabbitMQ` object and related `PetSets` and `Pods`. Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, ```bash -$ kubectl get RabbitMQopsrequest -n demo -Every 2.0s: kubectl get RabbitMQopsrequest -n demo +$ kubectl get rabbitmqopsrequest -n demo +Every 2.0s: kubectl get rabbitmqopsrequest -n demo NAME TYPE STATUS AGE -mops-replicaset-update UpdateVersion Successful 84s +rmops-replicaset-update UpdateVersion Successful 84s ``` We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to update the database version. ```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-replicaset-update -Name: mops-replicaset-update +$ kubectl describe rabbitmqopsrequest -n demo rmops-replicaset-update +Name: rmops-replicaset-update Namespace: demo Labels: Annotations: @@ -190,7 +185,7 @@ Metadata: Spec: Apply: IfReady Database Ref: - Name: mg-replicaset + Name: rm-replicaset Readiness Criteria: Objects Count Diff Percentage: 10 Oplog Max Lag Seconds: 20 @@ -239,17 +234,17 @@ Events: Normal Successful 38s KubeDB Ops-manager Operator Successfully Updated Database ``` -Now, we are going to verify whether the `RabbitMQ` and the related `StatefulSets` and their `Pods` have the new version image. Let's check, +Now, we are going to verify whether the `RabbitMQ` and the related `PetSets` and their `Pods` have the new version image. Let's check, ```bash -$ kubectl get mg -n demo mg-replicaset -o=jsonpath='{.spec.version}{"\n"}' -4.4.26 +$ kubectl get rm -n demo rm-replicaset -o=jsonpath='{.spec.version}{"\n"}' +3.13.2 -$ kubectl get sts -n demo mg-replicaset -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}' -mongo:4.0.5 +$ kubectl get petset -n demo rm-replicaset -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}' +ghcr.io/appscode-images/rabbitmq:3.13.2-management-alpine -$ kubectl get pods -n demo mg-replicaset-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}' -mongo:4.0.5 +$ kubectl get pods -n demo rm-replicaset-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}' +ghcr.io/appscode-images/rabbitmq:3.13.2-management-alpine ``` You can see from above, our `RabbitMQ` replicaset database has been updated with the new version. So, the updateVersion process is successfully completed. @@ -259,6 +254,6 @@ You can see from above, our `RabbitMQ` replicaset database has been updated with To clean up the Kubernetes resources created by this tutorial, run: ```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete RabbitMQopsrequest -n demo mops-replicaset-update +kubectl delete rm -n demo rm-replicaset +kubectl delete rabbitmqopsrequest -n demo rmops-replicaset-update ``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/volume-expansion/_index.md b/docs/guides/rabbitmq/volume-expansion/_index.md index e4cce90e11..d42bc467d8 100644 --- a/docs/guides/rabbitmq/volume-expansion/_index.md +++ b/docs/guides/rabbitmq/volume-expansion/_index.md @@ -2,9 +2,9 @@ title: Volume Expansion menu: docs_{{ .version }}: - identifier: mg-volume-expansion + identifier: rm-volume-expansion name: Volume Expansion - parent: mg-RabbitMQ-guides + parent: rm-guides weight: 44 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/rabbitmq/volume-expansion/overview.md b/docs/guides/rabbitmq/volume-expansion/overview.md index 3bb7c5c43c..b98295a611 100644 --- a/docs/guides/rabbitmq/volume-expansion/overview.md +++ b/docs/guides/rabbitmq/volume-expansion/overview.md @@ -2,9 +2,9 @@ title: RabbitMQ Volume Expansion Overview menu: docs_{{ .version }}: - identifier: mg-volume-expansion-overview + identifier: rm-volume-expansion-overview name: Overview - parent: mg-volume-expansion + parent: rm-volume-expansion weight: 10 menu_name: docs_{{ .version }} section_menu_id: guides @@ -14,23 +14,18 @@ section_menu_id: guides # RabbitMQ Volume Expansion -This guide will give an overview on how KubeDB Ops-manager operator expand the volume of various component of `RabbitMQ` such as ReplicaSet, Shard, ConfigServer, Mongos, etc. +This guide will give an overview on how KubeDB Ops-manager operator expand the volume of `RabbitMQ` cluster nodes. ## Before You Begin - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) ## How Volume Expansion Process Works The following diagram shows how KubeDB Ops-manager operator expand the volumes of `RabbitMQ` database components. Open the image in a new tab to see the enlarged version. -
-  Volume Expansion process of RabbitMQ -
Fig: Volume Expansion process of RabbitMQ
-
- The Volume Expansion process consists of the following steps: 1. At first, a user creates a `RabbitMQ` Custom Resource (CR). @@ -39,9 +34,9 @@ The Volume Expansion process consists of the following steps: 3. When the operator finds a `RabbitMQ` CR, it creates required number of `StatefulSets` and related necessary stuff like secrets, services, etc. -4. Each StatefulSet creates a Persistent Volume according to the Volume Claim Template provided in the statefulset configuration. This Persistent Volume will be expanded by the `KubeDB` Ops-manager operator. +4. Each StatefulSet creates a Persistent Volume according to the Volume Claim Template provided in the PetSet configuration. This Persistent Volume will be expanded by the `KubeDB` Ops-manager operator. -5. Then, in order to expand the volume of the various components (ie. ReplicaSet, Shard, ConfigServer, Mongos, etc.) of the `RabbitMQ` database the user creates a `RabbitMQOpsRequest` CR with desired information. +5. Then, in order to expand the volume the `RabbitMQ` database the user creates a `RabbitMQOpsRequest` CR with desired information. 6. `KubeDB` Ops-manager operator watches the `RabbitMQOpsRequest` CR. diff --git a/docs/guides/rabbitmq/volume-expansion/replicaset.md b/docs/guides/rabbitmq/volume-expansion/replicaset.md deleted file mode 100644 index 64cf723751..0000000000 --- a/docs/guides/rabbitmq/volume-expansion/replicaset.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: RabbitMQ Replicaset Volume Expansion -menu: - docs_{{ .version }}: - identifier: mg-volume-expansion-replicaset - name: Replicaset - parent: mg-volume-expansion - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# RabbitMQ Replicaset Volume Expansion - -This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a RabbitMQ Replicaset database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- You must have a `StorageClass` that supports volume expansion. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Replicaset](/docs/guides/RabbitMQ/clustering/replicaset.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Volume Expansion Overview](/docs/guides/RabbitMQ/volume-expansion/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> Note: The yaml files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Expand Volume of Replicaset - -Here, we are going to deploy a `RabbitMQ` replicaset using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to expand its volume. - -### Prepare RabbitMQ Replicaset Database - -At first verify that your cluster has a storage class, that supports volume expansion. Let's check, - -```bash -$ kubectl get storageclass -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -standard (default) kubernetes.io/gce-pd Delete Immediate true 2m49s -``` - -We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. - -Now, we are going to deploy a `RabbitMQ` replicaSet database with version `4.4.26`. - -### Deploy RabbitMQ - -In this section, we are going to deploy a RabbitMQ Replicaset database with 1GB volume. Then, in the next section we will expand its volume to 2GB using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-replicaset - namespace: demo -spec: - version: "4.4.26" - replicaSet: - name: "replicaset" - replicas: 3 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mg-replicaset.yaml -RabbitMQ.kubedb.com/mg-replicaset created -``` - -Now, wait until `mg-replicaset` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 10m -``` - -Let's check volume size from statefulset, and from the persistent volume, - -```bash -$ kubectl get sts -n demo mg-replicaset -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-2067c63d-f982-4b66-a008-5e9c3ff6218a 1Gi RWO Delete Bound demo/datadir-mg-replicaset-0 standard 10m -pvc-9db1aeb0-f1af-4555-93a3-0ca754327751 1Gi RWO Delete Bound demo/datadir-mg-replicaset-2 standard 9m45s -pvc-d38f42a8-50d4-4fa9-82ba-69fc7a464ff4 1Gi RWO Delete Bound demo/datadir-mg-replicaset-1 standard 10m -``` - -You can see the statefulset has 1GB storage, and the capacity of all the persistent volumes are also 1GB. - -We are now ready to apply the `RabbitMQOpsRequest` CR to expand the volume of this database. - -### Volume Expansion - -Here, we are going to expand the volume of the replicaset database. - -#### Create RabbitMQOpsRequest - -In order to expand the volume of the database, we have to create a `RabbitMQOpsRequest` CR with our desired volume size. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-volume-exp-replicaset - namespace: demo -spec: - type: VolumeExpansion - databaseRef: - name: mg-replicaset - volumeExpansion: - replicaSet: 2Gi - mode: Online -``` - -Here, - -- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `mops-volume-exp-replicaset` database. -- `spec.type` specifies that we are performing `VolumeExpansion` on our database. -- `spec.volumeExpansion.replicaSet` specifies the desired volume size. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mops-volume-exp-replicaset.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-volume-exp-replicaset created -``` - -#### Verify RabbitMQ replicaset volume expanded successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `RabbitMQ` object and related `StatefulSets` and `Persistent Volumes`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-volume-exp-replicaset VolumeExpansion Successful 83s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-replicaset -Name: mops-volume-exp-replicaset -Namespace: demo -Labels: -Annotations: API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2020-08-25T18:21:18Z - Finalizers: - kubedb.com - Generation: 1 - Resource Version: 84084 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-volume-exp-replicaset - UID: 2cec0cd3-4abe-4114-813c-1326f28563cb -Spec: - Database Ref: - Name: mg-replicaset - Type: VolumeExpansion - Volume Expansion: - ReplicaSet: 2Gi -Status: - Conditions: - Last Transition Time: 2020-08-25T18:21:18Z - Message: RabbitMQ ops request is being processed - Observed Generation: 1 - Reason: Scaling - Status: True - Type: Scaling - Last Transition Time: 2020-08-25T18:22:38Z - Message: Successfully updated Storage - Observed Generation: 1 - Reason: VolumeExpansion - Status: True - Type: VolumeExpansion - Last Transition Time: 2020-08-25T18:22:38Z - Message: Successfully Resumed RabbitMQ: mg-replicaset - Observed Generation: 1 - Reason: ResumeDatabase - Status: True - Type: ResumeDatabase - Last Transition Time: 2020-08-25T18:22:38Z - Message: Successfully completed the modification process - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal VolumeExpansion 3m11s KubeDB Ops-manager operator Successfully Updated Storage - Normal ResumeDatabase 3m11s KubeDB Ops-manager operator Resuming RabbitMQ - Normal ResumeDatabase 3m11s KubeDB Ops-manager operator Successfully Resumed RabbitMQ - Normal Successful 3m11s KubeDB Ops-manager operator Successfully Scaled Database -``` - -Now, we are going to verify from the `Statefulset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get sts -n demo mg-replicaset -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"2Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-2067c63d-f982-4b66-a008-5e9c3ff6218a 2Gi RWO Delete Bound demo/datadir-mg-replicaset-0 standard 19m -pvc-9db1aeb0-f1af-4555-93a3-0ca754327751 2Gi RWO Delete Bound demo/datadir-mg-replicaset-2 standard 18m -pvc-d38f42a8-50d4-4fa9-82ba-69fc7a464ff4 2Gi RWO Delete Bound demo/datadir-mg-replicaset-1 standard 19m -``` - -The above output verifies that we have successfully expanded the volume of the RabbitMQ database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete RabbitMQopsrequest -n demo mops-volume-exp-replicaset -``` \ No newline at end of file diff --git a/docs/guides/rabbitmq/volume-expansion/sharding.md b/docs/guides/rabbitmq/volume-expansion/sharding.md deleted file mode 100644 index 934c6f6c28..0000000000 --- a/docs/guides/rabbitmq/volume-expansion/sharding.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: RabbitMQ Sharded Database Volume Expansion -menu: - docs_{{ .version }}: - identifier: mg-volume-expansion-shard - name: Sharding - parent: mg-volume-expansion - weight: 40 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# RabbitMQ Sharded Database Volume Expansion - -This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a RabbitMQ Sharded Database. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- You must have a `StorageClass` that supports volume expansion. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [Sharding](/docs/guides/RabbitMQ/clustering/sharding.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Volume Expansion Overview](/docs/guides/RabbitMQ/volume-expansion/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> Note: The yaml files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Expand Volume of Sharded Database - -Here, we are going to deploy a `RabbitMQ` Sharded Database using a supported version by `KubeDB` operator. Then we are going to apply `RabbitMQOpsRequest` to expand the volume of shard nodes and config servers. - -### Prepare RabbitMQ Sharded Database - -At first verify that your cluster has a storage class, that supports volume expansion. Let's check, - -```bash -$ kubectl get storageclass -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -standard (default) kubernetes.io/gce-pd Delete Immediate true 2m49s -``` - -We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. - -Now, we are going to deploy a `RabbitMQ` standalone database with version `4.4.26`. - -### Deploy RabbitMQ - -In this section, we are going to deploy a RabbitMQ Sharded database with 1GB volume for each of the shard nodes and config servers. Then, in the next sections we will expand the volume of shard nodes and config servers to 2GB using `RabbitMQOpsRequest` CRD. Below is the YAML of the `RabbitMQ` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1alpha2 -kind: RabbitMQ -metadata: - name: mg-sharding - namespace: demo -spec: - version: 4.4.26 - shardTopology: - configServer: - replicas: 2 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard - mongos: - replicas: 2 - shard: - replicas: 2 - shards: 3 - storage: - resources: - requests: - storage: 1Gi - storageClassName: standard -``` - -Let's create the `RabbitMQ` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mg-shard.yaml -RabbitMQ.kubedb.com/mg-sharding created -``` - -Now, wait until `mg-sharding` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-sharding 4.4.26 Ready 2m45s -``` - -Let's check volume size from statefulset, and from the persistent volume of shards and config servers, - -```bash -$ kubectl get sts -n demo mg-sharding-configsvr -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get sts -n demo mg-sharding-shard0 -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"1Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-194f6e9c-b9a7-4d00-a125-a6c01273468c 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard0-0 standard 68s -pvc-390b6343-f97e-4761-a516-e3c9607c55d6 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard1-1 standard 2m26s -pvc-51ab98e8-d468-4a74-b176-3853dada41c2 1Gi RWO Delete Bound demo/datadir-mg-sharding-configsvr-1 standard 2m33s -pvc-5209095e-561f-4601-a0bf-0c705234da5b 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard1-0 standard 3m6s -pvc-5be2ab13-e12c-4053-8680-7c5588dff8eb 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard2-1 standard 2m32s -pvc-7e11502d-13e0-4a84-9ebe-29bc2b15f026 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard0-1 standard 44s -pvc-7e20906c-462d-47b7-b4cf-ba0ef69ba26e 1Gi RWO Delete Bound demo/datadir-mg-sharding-shard2-0 standard 3m7s -pvc-87634059-0f95-4595-ae8a-121944961103 1Gi RWO Delete Bound demo/datadir-mg-sharding-configsvr-0 standard 3m7s -``` - -You can see the statefulsets have 1GB storage, and the capacity of all the persistent volumes are also 1GB. - -We are now ready to apply the `RabbitMQOpsRequest` CR to expand the volume of this database. - -### Volume Expansion of Shard and ConfigServer Nodes - -Here, we are going to expand the volume of the shard and configServer nodes of the database. - -#### Create RabbitMQOpsRequest - -In order to expand the volume of the shard nodes of the database, we have to create a `RabbitMQOpsRequest` CR with our desired volume size. Below is the YAML of the `RabbitMQOpsRequest` CR that we are going to create, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: RabbitMQOpsRequest -metadata: - name: mops-volume-exp-shard - namespace: demo -spec: - type: VolumeExpansion - databaseRef: - name: mg-sharding - volumeExpansion: - mode: "Online" - shard: 2Gi - configServer: 2Gi -``` - -Here, -- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `mops-volume-exp-shard` database. -- `spec.type` specifies that we are performing `VolumeExpansion` on our database. -- `spec.volumeExpansion.shard` specifies the desired volume size of shard nodes. -- `spec.volumeExpansion.configServer` specifies the desired volume size of configServer nodes. - -> **Note:** If you don't want to expand the volume of all the components together, you can only specify the components (shard and configServer) that you want to expand. - -Let's create the `RabbitMQOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mops-volume-exp-shard.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-volume-exp-shard created -``` - -#### Verify RabbitMQ shard volumes expanded successfully - -If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `RabbitMQ` object and related `StatefulSets` and `Persistent Volumes`. - -Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, - -```bash -$ kubectl get RabbitMQopsrequest -n demo -NAME TYPE STATUS AGE -mops-volume-exp-shard VolumeExpansion Successful 3m49s -``` - -We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. - -```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-shard -Name: mops-volume-exp-shard -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: RabbitMQOpsRequest -Metadata: - Creation Timestamp: 2020-09-30T04:24:37Z - Generation: 1 - Resource Version: 140791 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-volume-exp-shard - UID: fc23a0a2-3a48-4b76-95c5-121f3d56df78 -Spec: - Database Ref: - Name: mg-sharding - Type: VolumeExpansion - Volume Expansion: - Config Server: 2Gi - Shard: 2Gi -Status: - Conditions: - Last Transition Time: 2020-09-30T04:25:48Z - Message: RabbitMQ ops request is expanding volume of database - Observed Generation: 1 - Reason: VolumeExpansion - Status: True - Type: VolumeExpansion - Last Transition Time: 2020-09-30T04:26:58Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: ConfigServerVolumeExpansion - Status: True - Type: ConfigServerVolumeExpansion - Last Transition Time: 2020-09-30T04:29:28Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: ShardVolumeExpansion - Status: True - Type: ShardVolumeExpansion - Last Transition Time: 2020-09-30T04:29:33Z - Message: Successfully Resumed RabbitMQ: mg-sharding - Observed Generation: 1 - Reason: ResumeDatabase - Status: True - Type: ResumeDatabase - Last Transition Time: 2020-09-30T04:29:33Z - Message: Successfully Expanded Volume - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ConfigServerVolumeExpansion 3m25s KubeDB Ops-manager operator Successfully Expanded Volume - Normal ShardVolumeExpansion 55s KubeDB Ops-manager operator Successfully Expanded Volume - Normal ResumeDatabase 50s KubeDB Ops-manager operator Resuming RabbitMQ - Normal ResumeDatabase 50s KubeDB Ops-manager operator Successfully Resumed RabbitMQ - Normal Successful 50s KubeDB Ops-manager operator Successfully Expanded Volume -``` - -Now, we are going to verify from the `Statefulset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get sts -n demo mg-sharding-configsvr -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"2Gi" - -$ kubectl get sts -n demo mg-sharding-shard0 -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"2Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-194f6e9c-b9a7-4d00-a125-a6c01273468c 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard0-0 standard 3m38s -pvc-390b6343-f97e-4761-a516-e3c9607c55d6 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard1-1 standard 4m56s -pvc-51ab98e8-d468-4a74-b176-3853dada41c2 2Gi RWO Delete Bound demo/datadir-mg-sharding-configsvr-1 standard 5m3s -pvc-5209095e-561f-4601-a0bf-0c705234da5b 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard1-0 standard 5m36s -pvc-5be2ab13-e12c-4053-8680-7c5588dff8eb 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard2-1 standard 5m2s -pvc-7e11502d-13e0-4a84-9ebe-29bc2b15f026 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard0-1 standard 3m14s -pvc-7e20906c-462d-47b7-b4cf-ba0ef69ba26e 2Gi RWO Delete Bound demo/datadir-mg-sharding-shard2-0 standard 5m37s -pvc-87634059-0f95-4595-ae8a-121944961103 2Gi RWO Delete Bound demo/datadir-mg-sharding-configsvr-0 standard 5m37s -``` - -The above output verifies that we have successfully expanded the volume of the shard nodes and configServer nodes of the RabbitMQ database. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-sharding -kubectl delete RabbitMQopsrequest -n demo mops-volume-exp-shard mops-volume-exp-configserver -``` diff --git a/docs/guides/rabbitmq/volume-expansion/standalone.md b/docs/guides/rabbitmq/volume-expansion/volume-expansion.md similarity index 81% rename from docs/guides/rabbitmq/volume-expansion/standalone.md rename to docs/guides/rabbitmq/volume-expansion/volume-expansion.md index 44e0be640d..cb2b588bd8 100644 --- a/docs/guides/rabbitmq/volume-expansion/standalone.md +++ b/docs/guides/rabbitmq/volume-expansion/volume-expansion.md @@ -1,10 +1,10 @@ --- -title: RabbitMQ Standalone Volume Expansion +title: RabbitMQ Volume Expansion menu: docs_{{ .version }}: - identifier: mg-volume-expansion-standalone + identifier: rm-volume-expansion-describe name: Standalone - parent: mg-volume-expansion + parent: rm-volume-expansion weight: 20 menu_name: docs_{{ .version }} section_menu_id: guides @@ -25,9 +25,9 @@ This guide will show you how to use `KubeDB` Ops-manager operator to expand the - Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - You should be familiar with the following `KubeDB` concepts: - - [RabbitMQ](/docs/guides/RabbitMQ/concepts/RabbitMQ.md) - - [RabbitMQOpsRequest](/docs/guides/RabbitMQ/concepts/opsrequest.md) - - [Volume Expansion Overview](/docs/guides/RabbitMQ/volume-expansion/overview.md) + - [RabbitMQ](/docs/guides/rabbitmq/concepts/rabbitmq.md) + - [RabbitMQOpsRequest](/docs/guides/rabbitmq/concepts/opsrequest.md) + - [Volume Expansion Overview](/docs/guides/rabbitmq/volume-expansion/overview.md) To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. @@ -36,7 +36,7 @@ $ kubectl create ns demo namespace/demo created ``` -> Note: The yaml files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/RabbitMQ) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +> Note: The yaml files used in this tutorial are stored in [docs/examples/RabbitMQ](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/rabbitmq) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Expand Volume of Standalone Database @@ -54,7 +54,7 @@ standard (default) kubernetes.io/gce-pd Delete Immediate We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. -Now, we are going to deploy a `RabbitMQ` standalone database with version `4.4.26`. +Now, we are going to deploy a `RabbitMQ` standalone database with version `3.13.2`. #### Deploy RabbitMQ standalone @@ -64,10 +64,10 @@ In this section, we are going to deploy a RabbitMQ standalone database with 1GB apiVersion: kubedb.com/v1alpha2 kind: RabbitMQ metadata: - name: mg-standalone + name: rm-standalone namespace: demo spec: - version: "4.4.26" + version: "3.13.2" storageType: Durable storage: storageClassName: "standard" @@ -81,30 +81,30 @@ spec: Let's create the `RabbitMQ` CR we have shown above, ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mg-standalone.yaml -RabbitMQ.kubedb.com/mg-standalone created +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/rm-standalone.yaml +RabbitMQ.kubedb.com/rm-standalone created ``` -Now, wait until `mg-standalone` has status `Ready`. i.e, +Now, wait until `rm-standalone` has status `Ready`. i.e, ```bash $ kubectl get mg -n demo NAME VERSION STATUS AGE -mg-standalone 4.4.26 Ready 2m53s +rm-standalone 3.13.2 Ready 2m53s ``` -Let's check volume size from statefulset, and from the persistent volume, +Let's check volume size from PetSet, and from the persistent volume, ```bash -$ kubectl get sts -n demo mg-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +$ kubectl get petset -n demo rm-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' "1Gi" $ kubectl get pv -n demo NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-d0b07657-a012-4384-862a-b4e437774287 1Gi RWO Delete Bound demo/datadir-mg-standalone-0 standard 49s +pvc-d0b07657-a012-4384-862a-b4e437774287 1Gi RWO Delete Bound demo/datadir-rm-standalone-0 standard 49s ``` -You can see the statefulset has 1GB storage, and the capacity of the persistent volume is also 1GB. +You can see the PetSet has 1GB storage, and the capacity of the persistent volume is also 1GB. We are now ready to apply the `RabbitMQOpsRequest` CR to expand the volume of this database. @@ -120,14 +120,14 @@ In order to expand the volume of the database, we have to create a `RabbitMQOpsR apiVersion: ops.kubedb.com/v1alpha1 kind: RabbitMQOpsRequest metadata: - name: mops-volume-exp-standalone + name: rmops-volume-exp-standalone namespace: demo spec: type: VolumeExpansion databaseRef: - name: mg-standalone + name: rm-standalone volumeExpansion: - standalone: 2Gi + node: 2Gi mode: Online ``` @@ -143,8 +143,8 @@ During `Online` VolumeExpansion KubeDB expands volume without pausing database o Let's create the `RabbitMQOpsRequest` CR we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/RabbitMQ/volume-expansion/mops-volume-exp-standalone.yaml -RabbitMQopsrequest.ops.kubedb.com/mops-volume-exp-standalone created +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/rabbitmq/volume-expansion/rmops-volume-exp-standalone.yaml +rabbitmqopsrequest.ops.kubedb.com/rmops-volume-exp-standalone created ``` #### Verify RabbitMQ Standalone volume expanded successfully @@ -154,16 +154,16 @@ If everything goes well, `KubeDB` Ops-manager operator will update the volume si Let's wait for `RabbitMQOpsRequest` to be `Successful`. Run the following command to watch `RabbitMQOpsRequest` CR, ```bash -$ kubectl get RabbitMQopsrequest -n demo +$ kubectl get rabbitmqopsrequest -n demo NAME TYPE STATUS AGE -mops-volume-exp-standalone VolumeExpansion Successful 75s +rmops-volume-exp-standalone VolumeExpansion Successful 75s ``` We can see from the above output that the `RabbitMQOpsRequest` has succeeded. If we describe the `RabbitMQOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. ```bash -$ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-standalone - Name: mops-volume-exp-standalone +$ kubectl describe rabbitmqopsrequest -n demo rmops-volume-exp-standalone + Name: rmops-volume-exp-standalone Namespace: demo Labels: Annotations: API Version: ops.kubedb.com/v1alpha1 @@ -174,11 +174,11 @@ $ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-standalone kubedb.com Generation: 1 Resource Version: 72899 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/RabbitMQopsrequests/mops-volume-exp-standalone + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/rabbitmqopsrequest/rmops-volume-exp-standalone UID: 007fe35a-25f6-45e7-9e85-9add488b2622 Spec: Database Ref: - Name: mg-standalone + Name: rm-standalone Type: VolumeExpansion Volume Expansion: Standalone: 2Gi @@ -197,7 +197,7 @@ $ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-standalone Status: True Type: VolumeExpansion Last Transition Time: 2020-08-25T17:50:03Z - Message: Successfully Resumed RabbitMQ: mg-standalone + Message: Successfully Resumed RabbitMQ: rm-standalone Observed Generation: 1 Reason: ResumeDatabase Status: True @@ -222,7 +222,7 @@ $ kubectl describe RabbitMQopsrequest -n demo mops-volume-exp-standalone Now, we are going to verify from the `Statefulset`, and the `Persistent Volume` whether the volume of the standalone database has expanded to meet the desired state, Let's check, ```bash -$ kubectl get sts -n demo mg-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +$ kubectl get petset -n demo rm-standalone -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' "2Gi" $ kubectl get pv -n demo @@ -237,6 +237,6 @@ The above output verifies that we have successfully expanded the volume of the R To clean up the Kubernetes resources created by this tutorial, run: ```bash -kubectl delete mg -n demo mg-standalone -kubectl delete RabbitMQopsrequest -n demo mops-volume-exp-standalone +kubectl delete rm -n demo rm-standalone +kubectl delete rabbitmqopsrequest -n demo rmops-volume-exp-standalone ```