From 2c126dc6c89e0ea177dea685e9e0dc90924de9a2 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 23 Aug 2024 15:34:45 +0200 Subject: [PATCH 01/12] [crdb] Upgrade CockroachDB to 24.1.3 --- Makefile | 2 +- build/dev/docker-compose_dss.yaml | 2 +- cmds/core-service/README.md | 2 +- .../modules/terraform-aws-dss/terraform.dev.example.tfvars | 2 +- .../modules/terraform-google-dss/terraform.dev.example.tfvars | 2 +- deploy/operations/Dockerfile | 2 +- deploy/operations/ci/aws-1/terraform.tfvars | 2 +- deploy/services/helm-charts/dss/values.example.yaml | 2 +- test/migrations/clear_db.sh | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 0d3eaae87..1f904def6 100644 --- a/Makefile +++ b/Makefile @@ -110,7 +110,7 @@ test-go-units: .PHONY: test-go-units-crdb test-go-units-crdb: cleanup-test-go-units-crdb - @docker run -d --name dss-crdb-for-testing -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v21.2.7 start-single-node --listen-addr=0.0.0.0 --insecure > /dev/null + @docker run -d --name dss-crdb-for-testing -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v24.1.3 start-single-node --listen-addr=0.0.0.0 --insecure > /dev/null @until [ -n "`docker logs dss-crdb-for-testing | grep 'nodeID'`" ]; do echo "Waiting for CRDB to be ready"; sleep 3; done; go run ./cmds/db-manager/main.go --schemas_dir ./build/db_schemas/rid --db_version latest --cockroach_host localhost go test -count=1 -v ./pkg/rid/store/cockroach --cockroach_host localhost --cockroach_port 26257 --cockroach_ssl_mode disable --cockroach_user root --cockroach_db_name rid diff --git a/build/dev/docker-compose_dss.yaml b/build/dev/docker-compose_dss.yaml index e84e4d078..3ce9dad1f 100644 --- a/build/dev/docker-compose_dss.yaml +++ b/build/dev/docker-compose_dss.yaml @@ -8,7 +8,7 @@ version: '3.8' services: local-dss-crdb: - image: cockroachdb/cockroach:v21.2.7 + image: cockroachdb/cockroach:v24.1.3 command: start-single-node --insecure ports: - "26257:26257" diff --git a/cmds/core-service/README.md b/cmds/core-service/README.md index 40e2573c4..bf58c629e 100644 --- a/cmds/core-service/README.md +++ b/cmds/core-service/README.md @@ -32,7 +32,7 @@ go run ./cmds/core-service \ To run correctly, core-service must be able to [access](../../pkg/cockroach/flags/flags.go) a CockroachDB cluster. Provision of this cluster is handled automatically for a local development environment if following [the instructions for a standalone instance](../../build/dev/standalone_instance.md). Or, a CockroachDB instance can be created manually with: ```bash -docker container run -p 26257:26257 -p 8080:8080 --rm cockroachdb/cockroach:v21.2.7 start-single-node --insecure +docker container run -p 26257:26257 -p 8080:8080 --rm cockroachdb/cockroach:v24.1.3 start-single-node --insecure ``` #### Database configuration diff --git a/deploy/infrastructure/modules/terraform-aws-dss/terraform.dev.example.tfvars b/deploy/infrastructure/modules/terraform-aws-dss/terraform.dev.example.tfvars index d06e9bff3..d9cc764a3 100644 --- a/deploy/infrastructure/modules/terraform-aws-dss/terraform.dev.example.tfvars +++ b/deploy/infrastructure/modules/terraform-aws-dss/terraform.dev.example.tfvars @@ -26,7 +26,7 @@ authorization = { should_init = true # CockroachDB -crdb_image_tag = "v21.2.7" +crdb_image_tag = "v24.1.3" crdb_cluster_name = "interuss_example" crdb_locality = "interuss_dss-aws-ew1" crdb_external_nodes = [] diff --git a/deploy/infrastructure/modules/terraform-google-dss/terraform.dev.example.tfvars b/deploy/infrastructure/modules/terraform-google-dss/terraform.dev.example.tfvars index 9f192c0c6..70e21eff2 100644 --- a/deploy/infrastructure/modules/terraform-google-dss/terraform.dev.example.tfvars +++ b/deploy/infrastructure/modules/terraform-google-dss/terraform.dev.example.tfvars @@ -27,7 +27,7 @@ authorization = { should_init = true # CockroachDB -crdb_image_tag = "v21.2.7" +crdb_image_tag = "v24.1.3" crdb_cluster_name = "interuss_example" crdb_locality = "interuss_dss-dev-w6a" crdb_external_nodes = [] diff --git a/deploy/operations/Dockerfile b/deploy/operations/Dockerfile index 3d7eb0e2f..2e969f7ff 100644 --- a/deploy/operations/Dockerfile +++ b/deploy/operations/Dockerfile @@ -1,6 +1,6 @@ FROM ubuntu:22.04 -ENV COCKROACH_VERSION 21.2.7 +ENV COCKROACH_VERSION 24.1.3 RUN apt-get update \ && apt-get install -y unzip curl gnupg lsb-release apt-transport-https ca-certificates diff --git a/deploy/operations/ci/aws-1/terraform.tfvars b/deploy/operations/ci/aws-1/terraform.tfvars index c9c42c775..d4e007e02 100644 --- a/deploy/operations/ci/aws-1/terraform.tfvars +++ b/deploy/operations/ci/aws-1/terraform.tfvars @@ -23,7 +23,7 @@ authorization = { public_key_pem_path = "/test-certs/auth2.pem" } should_init = true -crdb_image_tag = "v21.2.7" +crdb_image_tag = "v24.3.1" crdb_cluster_name = "interuss-ci" crdb_locality = "interuss_dss-ci-aws-ue1" crdb_external_nodes = [] diff --git a/deploy/services/helm-charts/dss/values.example.yaml b/deploy/services/helm-charts/dss/values.example.yaml index 1f3a25de2..54c5a7417 100644 --- a/deploy/services/helm-charts/dss/values.example.yaml +++ b/deploy/services/helm-charts/dss/values.example.yaml @@ -13,7 +13,7 @@ dss: cockroachdb: # See https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml image: - tag: v21.2.7 + tag: v24.3.1 fullnameOverride: dss-cockroachdb conf: join: [] diff --git a/test/migrations/clear_db.sh b/test/migrations/clear_db.sh index 15d5194f9..abde3240d 100755 --- a/test/migrations/clear_db.sh +++ b/test/migrations/clear_db.sh @@ -8,5 +8,5 @@ echo "Starting CRDB container" docker run -d --rm --name dss-crdb-for-migration-testing \ -p 26257:26257 \ -p 8080:8080 \ - cockroachdb/cockroach:v21.2.7 start-single-node \ + cockroachdb/cockroach:v24.1.3 start-single-node \ --insecure > /dev/null From 776cc2c0303480a9b0e766b7297f831a8f6bd813 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Wed, 28 Aug 2024 00:09:15 +0200 Subject: [PATCH 02/12] Update start-single-node argument --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1f904def6..6359b31fd 100644 --- a/Makefile +++ b/Makefile @@ -110,7 +110,7 @@ test-go-units: .PHONY: test-go-units-crdb test-go-units-crdb: cleanup-test-go-units-crdb - @docker run -d --name dss-crdb-for-testing -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v24.1.3 start-single-node --listen-addr=0.0.0.0 --insecure > /dev/null + @docker run -d --name dss-crdb-for-testing -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v24.1.3 start-single-node --insecure > /dev/null @until [ -n "`docker logs dss-crdb-for-testing | grep 'nodeID'`" ]; do echo "Waiting for CRDB to be ready"; sleep 3; done; go run ./cmds/db-manager/main.go --schemas_dir ./build/db_schemas/rid --db_version latest --cockroach_host localhost go test -count=1 -v ./pkg/rid/store/cockroach --cockroach_host localhost --cockroach_port 26257 --cockroach_ssl_mode disable --cockroach_user root --cockroach_db_name rid From f57e50aa34bb943f90c87ea650db169bf5a15a00 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Thu, 29 Aug 2024 23:11:26 +0200 Subject: [PATCH 03/12] Add CRDB upgrade/migration instructions --- deploy/MIGRATION.md | 125 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 113 insertions(+), 12 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 72c3ee1cb..d3d088d0d 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -1,19 +1,120 @@ -# Kubernetes version migration +# CockroachDB and Kubernetes version migration -This page provides information on how to upgrade your Kubernetes cluster deployed using the +This page provides information on how to upgrade your CockroachDB and Kubernetes cluster deployed using the tools from this repository. +## CockroachDB upgrades + +Cockroach DB must be upgraded on all DSS instances of the pool at the same time. Therefore, all DSS instances +connected to a pool must coordinate the upgrade. The rollout of the upgrades on the whole CRDB cluster +must be carefully performed in a sequence in order to keep the majority of nodes healthy during that period +and prevent downtime. +For a Pooled deployment, one of the DSS Instance must take the role of the upgrade "Leader" and coordinate the +upgrade with other "Followers" DSS instances. +In general a Cockroach DB upgrade consists of: +1. Upgrade preparation: Verify that the cluster is in a nominal state ready for upgrade. +1. Decide how the upgrade will be finalized (for major upgrades only): Like CockroachDB, we recommend disabling auto-finalization. +1. Perform the rolling upgrade: This step should be performed first by the Leader and as quickly as possible by the Followers **one after the other**. Note that during this period, the performance of the cluster may be impacted since, as documented by CockroachDB, "a query that is sent to an upgraded node can be distributed only among other upgraded nodes. Data accesses that would otherwise be local may become remote, and the performance of these queries can suffer." +1. Roll back the upgrade (optional): Like the rolling upgrade, this step should be carefully coordinated with all DSS instances to guarantee the minimum number of healthy nodes to keep the cluster available. +1. Finish the upgrade: This step should be accomplished by the Leader. + +The following sections provide links to the CockroachDB migration documentation. + +**Important notes:** + +- Further work is required to test and evaluate the availability of the DSS during migrations. +- We recommend to review carefully the instructions provided by CockroachDB and to rehearse all migrations on a test + environment before applying them to production. + +### Terraform deployment notes + +If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment-notes) or [Tanka](MIGRATION.md#tanka-deployment-notes). +Then, update the variable `crdb_image_tag` in your `terraform.tfvars` to align your configuration with the new state of +the cluster and avoid a conflict on the next terraform update. + +### Helm deployment notes + +If you deployed the DSS using the Helm chart and the instructions provided in this repository, the values edited using `helm upgrade ... --set` +commands should preferably be updated in your deployment `values.yaml`. +In order to edit keys of the `cockroachdb/cockroachdb` Helm chart, you need to use the root key `cockroachdb` since it is +a dependency of the dss chart. +For instance, setting the image tag and partition using the command line would look like this: +``` +helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.image.tag=v24.1.3 --reuse-values +``` +``` +helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.statefulset.updateStrategy.rollingUpdate.partition=0 --reuse-values +``` +and if editing the image tag and rollout partition in your `values.yaml`, it would look like this: +```yaml +cockroachdb: + image: + # ... + tag: v24.1.3 + statefulset: + updateStrategy: + rollingUpdate: + partition: 0 +``` +New values can then be applied using `helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] -f [values.yaml]` + +#### 21.2.7 to 24.1.3 + +CockroachDB requires to upgrade one minor version at a time: + +1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=helm). +2. 22.1 to 22.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=helm). +3. 22.2 to 23.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=helm). +4. 23.1 to 23.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=helm). +5. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=helm). + +### Tanka deployment notes + +For deployments using Tanka configuration, following the `Cluster Upgrade with Manual configs` approach is recommended. (See specific links below) +To apply the changes to your cluster, you have two options: +1. Follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configuration, especially the new image version +(see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. +1. It is also possible to use Tanka to propagate the changes instead of editing resources directly using kubectl as documented in the CockroachDB documentation: + 1. Patching the image version can be achieved by updating the value of your configuration metadata key [`cockroach.image`](https://github.com/interuss/dss/blob/master/build/deploy/examples/minimum/main.jsonnet#L13) (see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)). + + 1. Patching the rolling update partition `{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":2}}}}` can be achieved by adding + the metadata key `cockroach.partition`. + ``` + local metadata = metadataBase { + #... + cockroach+: { + image: 'VAR_CRDB_DOCKER_IMAGE_NAME', + #... + partition: 0 + } + #... + } + ``` + +#### 21.2.7 to 24.1.3 + +CockroachDB requires to upgrade one minor version at a time. As mentioned above, no instruction is provided for tanka specifically, +therefore we recommend to follow the manual steps: + +1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=manual). +2. 22.1 to 22.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=manual). +3. 22.2 to 23.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=manual). +4. 23.1 to 23.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=manual). +5. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=manual). + +## Kubernetes upgrades + **Important notes:** - The migration plan below has been tested with the deployment of services using [Helm](services/helm-charts) and [Tanka](../build/deploy) without Istio enabled. Note that this configuration flag has been decommissioned since [#995](https://github.com/interuss/dss/pull/995). - Further work is required to test and evaluate the availability of the DSS during migrations. - It is highly recommended to rehearse such operation on a test cluster before applying them to a production environment. -## Google - Google Kubernetes Engine +### Google - Google Kubernetes Engine Migrations of GKE clusters are managed using terraform. -### 1.27 to 1.28 +#### 1.27 to 1.28 1. Change your `terraform.tfvars` to use `1.28` by adding or updating the `kubernetes_version` variable: ```terraform @@ -22,7 +123,7 @@ Migrations of GKE clusters are managed using terraform. 2. Run `terraform apply`. This operation may take more than 30min. 3. Monitor the upgrade of the nodes in the Google Cloud console. -### 1.26 to 1.27 +#### 1.26 to 1.27 1. Change your `terraform.tfvars` to use `1.27` by adding or updating the `kubernetes_version` variable: ```terraform @@ -31,7 +132,7 @@ Migrations of GKE clusters are managed using terraform. 2. Run `terraform apply`. This operation may take more than 30min. 3. Monitor the upgrade of the nodes in the Google Cloud console. -### 1.25 to 1.26 +#### 1.25 to 1.26 1. Change your `terraform.tfvars` to use `1.26` by adding or updating the `kubernetes_version` variable: ```terraform @@ -40,7 +141,7 @@ Migrations of GKE clusters are managed using terraform. 2. Run `terraform apply` 3. Monitor the upgrade of the nodes in the Google Cloud console. -### 1.24 to 1.25 +#### 1.24 to 1.25 1. Change your `terraform.tfvars` to use `1.25` by adding or updating the `kubernetes_version` variable: ```terraform @@ -49,7 +150,7 @@ Migrations of GKE clusters are managed using terraform. 2. Run `terraform apply`. This operation may take more than 30min. 3. Monitor the upgrade of the nodes in the Google Cloud console. -## AWS - Elastic Kubernetes Service +### AWS - Elastic Kubernetes Service Currently, upgrades of EKS can't be achieved reliably with terraform directly. The recommended workaround is to use the web console of AWS Elastic Kubernetes Service (EKS) to upgrade the cluster. @@ -57,7 +158,7 @@ Before proceeding, always check on the cluster page the *Upgrade Insights* tab w availability of Kubernetes resources in each version. The following sections omit this check if no resource is expected to be reported in the context of a standard deployment performed with the tools in this repository. -### 1.27 to 1.28 +#### 1.27 to 1.28 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. 2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. @@ -66,7 +167,7 @@ expected to be reported in the context of a standard deployment performed with t kubernetes_version = 1.28 ``` -### 1.26 to 1.27 +#### 1.26 to 1.27 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. 2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. @@ -75,7 +176,7 @@ expected to be reported in the context of a standard deployment performed with t kubernetes_version = 1.27 ``` -### 1.25 to 1.26 +#### 1.25 to 1.26 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. 2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. @@ -84,7 +185,7 @@ expected to be reported in the context of a standard deployment performed with t kubernetes_version = 1.26 ``` -### 1.24 to 1.25 +#### 1.24 to 1.25 1. Check for deprecated resources: - Click on the Upgrade Insights tab to see deprecation warnings on the cluster page. From 477d975572aaa47efa87fed64deedaf6cca60754 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Thu, 29 Aug 2024 23:59:11 +0200 Subject: [PATCH 04/12] Only keep manual approach for tanka upgrade --- deploy/MIGRATION.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index d3d088d0d..bcab2996e 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -50,7 +50,7 @@ and if editing the image tag and rollout partition in your `values.yaml`, it wou cockroachdb: image: # ... - tag: v24.1.3 + tag: # version statefulset: updateStrategy: rollingUpdate: @@ -74,22 +74,6 @@ For deployments using Tanka configuration, following the `Cluster Upgrade with M To apply the changes to your cluster, you have two options: 1. Follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configuration, especially the new image version (see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. -1. It is also possible to use Tanka to propagate the changes instead of editing resources directly using kubectl as documented in the CockroachDB documentation: - 1. Patching the image version can be achieved by updating the value of your configuration metadata key [`cockroach.image`](https://github.com/interuss/dss/blob/master/build/deploy/examples/minimum/main.jsonnet#L13) (see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)). - - 1. Patching the rolling update partition `{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":2}}}}` can be achieved by adding - the metadata key `cockroach.partition`. - ``` - local metadata = metadataBase { - #... - cockroach+: { - image: 'VAR_CRDB_DOCKER_IMAGE_NAME', - #... - partition: 0 - } - #... - } - ``` #### 21.2.7 to 24.1.3 From c37de2f6a2cf6b707c2aa8bf6225c48950c32152 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:05:00 +0200 Subject: [PATCH 05/12] Edits --- deploy/MIGRATION.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index bcab2996e..4286400d3 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -34,9 +34,11 @@ the cluster and avoid a conflict on the next terraform update. ### Helm deployment notes -If you deployed the DSS using the Helm chart and the instructions provided in this repository, the values edited using `helm upgrade ... --set` -commands should preferably be updated in your deployment `values.yaml`. -In order to edit keys of the `cockroachdb/cockroachdb` Helm chart, you need to use the root key `cockroachdb` since it is +If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions +provided by CockroachDB `Cluster Upgrade with Helm` (Links below). Note that the CockroachDB documentation suggest +to edit the values using `helm upgrade ... --set` commands. +However, you can alternatively update `helm_values.yaml` in your deployment. +With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb helm chart is a dependency of the dss chart. For instance, setting the image tag and partition using the command line would look like this: ``` From c712b10e7e57482a12a4f613bbc18cf753a7c31d Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:11:03 +0200 Subject: [PATCH 06/12] Edits --- deploy/MIGRATION.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 4286400d3..39290c2a8 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -35,10 +35,9 @@ the cluster and avoid a conflict on the next terraform update. ### Helm deployment notes If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions -provided by CockroachDB `Cluster Upgrade with Helm` (Links below). Note that the CockroachDB documentation suggest -to edit the values using `helm upgrade ... --set` commands. -However, you can alternatively update `helm_values.yaml` in your deployment. -With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb helm chart is +provided by CockroachDB `Cluster Upgrade with Helm` (See specific links below). Note that the CockroachDB documentation +suggest to edit the values using `helm upgrade ... --set` commands. However, you can alternatively update `helm_values.yml` +in your deployment. With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb helm chart is a dependency of the dss chart. For instance, setting the image tag and partition using the command line would look like this: ``` From b1ffa7d73816f1ed5bfd032ee389353eb708a7c4 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:12:15 +0200 Subject: [PATCH 07/12] Edits --- deploy/MIGRATION.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 39290c2a8..acc4d5e96 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -26,13 +26,13 @@ The following sections provide links to the CockroachDB migration documentation. - We recommend to review carefully the instructions provided by CockroachDB and to rehearse all migrations on a test environment before applying them to production. -### Terraform deployment notes +### Terraform deployment -If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment-notes) or [Tanka](MIGRATION.md#tanka-deployment-notes). +If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment) or [Tanka](MIGRATION.md#tanka-deployment). Then, update the variable `crdb_image_tag` in your `terraform.tfvars` to align your configuration with the new state of the cluster and avoid a conflict on the next terraform update. -### Helm deployment notes +### Helm deployment If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions provided by CockroachDB `Cluster Upgrade with Helm` (See specific links below). Note that the CockroachDB documentation @@ -69,7 +69,7 @@ CockroachDB requires to upgrade one minor version at a time: 4. 23.1 to 23.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=helm). 5. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=helm). -### Tanka deployment notes +### Tanka deployment For deployments using Tanka configuration, following the `Cluster Upgrade with Manual configs` approach is recommended. (See specific links below) To apply the changes to your cluster, you have two options: From b3cbd2ca0f89f4c2d599fa8518b136451dc124bf Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:14:00 +0200 Subject: [PATCH 08/12] Edits --- deploy/MIGRATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index acc4d5e96..3aaa8aa74 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -5,13 +5,13 @@ tools from this repository. ## CockroachDB upgrades -Cockroach DB must be upgraded on all DSS instances of the pool at the same time. Therefore, all DSS instances +CockroachDB must be upgraded on all DSS instances of the pool at the same time. Therefore, all DSS instances connected to a pool must coordinate the upgrade. The rollout of the upgrades on the whole CRDB cluster must be carefully performed in a sequence in order to keep the majority of nodes healthy during that period and prevent downtime. For a Pooled deployment, one of the DSS Instance must take the role of the upgrade "Leader" and coordinate the upgrade with other "Followers" DSS instances. -In general a Cockroach DB upgrade consists of: +In general a CockroachDB upgrade consists of: 1. Upgrade preparation: Verify that the cluster is in a nominal state ready for upgrade. 1. Decide how the upgrade will be finalized (for major upgrades only): Like CockroachDB, we recommend disabling auto-finalization. 1. Perform the rolling upgrade: This step should be performed first by the Leader and as quickly as possible by the Followers **one after the other**. Note that during this period, the performance of the cluster may be impacted since, as documented by CockroachDB, "a query that is sent to an upgraded node can be distributed only among other upgraded nodes. Data accesses that would otherwise be local may become remote, and the performance of these queries can suffer." From 087917ab183f5a13b9a4e09875678a5032479301 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:21:01 +0200 Subject: [PATCH 09/12] Edits --- deploy/MIGRATION.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 3aaa8aa74..bac4c4568 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -18,7 +18,7 @@ In general a CockroachDB upgrade consists of: 1. Roll back the upgrade (optional): Like the rolling upgrade, this step should be carefully coordinated with all DSS instances to guarantee the minimum number of healthy nodes to keep the cluster available. 1. Finish the upgrade: This step should be accomplished by the Leader. -The following sections provide links to the CockroachDB migration documentation. +The following sections provide links to the CockroachDB migration documentation depending on your deployment type. **Important notes:** @@ -30,14 +30,14 @@ The following sections provide links to the CockroachDB migration documentation. If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment) or [Tanka](MIGRATION.md#tanka-deployment). Then, update the variable `crdb_image_tag` in your `terraform.tfvars` to align your configuration with the new state of -the cluster and avoid a conflict on the next terraform update. +the cluster. ### Helm deployment If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions provided by CockroachDB `Cluster Upgrade with Helm` (See specific links below). Note that the CockroachDB documentation suggest to edit the values using `helm upgrade ... --set` commands. However, you can alternatively update `helm_values.yml` -in your deployment. With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb helm chart is +in your deployment. With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb Helm chart is a dependency of the dss chart. For instance, setting the image tag and partition using the command line would look like this: ``` @@ -46,7 +46,8 @@ helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.image.tag=v24.1 ``` helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.statefulset.updateStrategy.rollingUpdate.partition=0 --reuse-values ``` -and if editing the image tag and rollout partition in your `values.yaml`, it would look like this: + +If using a values file (eg `helm_values.yml`), you can set the new image tag and rollout partition like this: ```yaml cockroachdb: image: @@ -57,7 +58,7 @@ cockroachdb: rollingUpdate: partition: 0 ``` -New values can then be applied using `helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] -f [values.yaml]` +New values can then be applied using `helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] -f [helm_values.yml]` #### 21.2.7 to 24.1.3 @@ -71,15 +72,14 @@ CockroachDB requires to upgrade one minor version at a time: ### Tanka deployment -For deployments using Tanka configuration, following the `Cluster Upgrade with Manual configs` approach is recommended. (See specific links below) -To apply the changes to your cluster, you have two options: -1. Follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configuration, especially the new image version -(see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. +For deployments using Tanka configuration, since no instructions are provided for tanka specifically, +we recommend to follow the manual steps documented by CockroachDB: `Cluster Upgrade with Manual configs`. (See specific links below) +To apply the changes to your cluster, follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configuration, +especially the new image version (see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. #### 21.2.7 to 24.1.3 -CockroachDB requires to upgrade one minor version at a time. As mentioned above, no instruction is provided for tanka specifically, -therefore we recommend to follow the manual steps: +CockroachDB requires to upgrade one minor version at a time. 1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=manual). 2. 22.1 to 22.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=manual). From bb235949bf62ce71376266236913ba5762b91b2b Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:41:31 +0200 Subject: [PATCH 10/12] Edits --- deploy/MIGRATION.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index bac4c4568..732de7085 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -5,9 +5,8 @@ tools from this repository. ## CockroachDB upgrades -CockroachDB must be upgraded on all DSS instances of the pool at the same time. Therefore, all DSS instances -connected to a pool must coordinate the upgrade. The rollout of the upgrades on the whole CRDB cluster -must be carefully performed in a sequence in order to keep the majority of nodes healthy during that period +CockroachDB must be upgraded on all DSS instances of the pool one after the other. The rollout of the upgrades on +the whole CRDB cluster must be carefully performed in sequence to keep the majority of nodes healthy during that period and prevent downtime. For a Pooled deployment, one of the DSS Instance must take the role of the upgrade "Leader" and coordinate the upgrade with other "Followers" DSS instances. @@ -18,7 +17,8 @@ In general a CockroachDB upgrade consists of: 1. Roll back the upgrade (optional): Like the rolling upgrade, this step should be carefully coordinated with all DSS instances to guarantee the minimum number of healthy nodes to keep the cluster available. 1. Finish the upgrade: This step should be accomplished by the Leader. -The following sections provide links to the CockroachDB migration documentation depending on your deployment type. +The following sections provide links to the CockroachDB migration documentation depending on your deployment type, which can +be different by DSS instance. **Important notes:** @@ -28,17 +28,16 @@ The following sections provide links to the CockroachDB migration documentation ### Terraform deployment -If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment) or [Tanka](MIGRATION.md#tanka-deployment). -Then, update the variable `crdb_image_tag` in your `terraform.tfvars` to align your configuration with the new state of -the cluster. +If a DSS instance has been deployed with terraform, first upgrade the cluster using [Helm](MIGRATION.md#helm-deployment) +or [Tanka](MIGRATION.md#tanka-deployment). Then, update the variable `crdb_image_tag` in your `terraform.tfvars` to +align your configuration with the new state of the cluster. ### Helm deployment If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions provided by CockroachDB `Cluster Upgrade with Helm` (See specific links below). Note that the CockroachDB documentation -suggest to edit the values using `helm upgrade ... --set` commands. However, you can alternatively update `helm_values.yml` -in your deployment. With both approaches, you will need to use the root key `cockroachdb` since the cockroachdb Helm chart is -a dependency of the dss chart. +suggest to edit the values using `helm upgrade ... --set` commands. You will need to use the root key `cockroachdb` +since the cockroachdb Helm chart is a dependency of the dss chart. For instance, setting the image tag and partition using the command line would look like this: ``` helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.image.tag=v24.1.3 --reuse-values @@ -46,8 +45,7 @@ helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.image.tag=v24.1 ``` helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] --set cockroachdb.statefulset.updateStrategy.rollingUpdate.partition=0 --reuse-values ``` - -If using a values file (eg `helm_values.yml`), you can set the new image tag and rollout partition like this: +Alternatively, you can update `helm_values.yml` in your deployment and set the new image tag and rollout partition like this: ```yaml cockroachdb: image: @@ -58,11 +56,12 @@ cockroachdb: rollingUpdate: partition: 0 ``` -New values can then be applied using `helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] -f [helm_values.yml]` +New values can then be applied using `helm upgrade [RELEASE_NAME] [PATH_TO_DSS_HELM] -f [helm_values.yml]`. +We recommend the second approach to keep your helm values in sync with the cluster state. #### 21.2.7 to 24.1.3 -CockroachDB requires to upgrade one minor version at a time: +CockroachDB requires to upgrade one minor version at a time, therefore the following migrations have to be performed: 1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=helm). 2. 22.1 to 22.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=helm). @@ -73,13 +72,14 @@ CockroachDB requires to upgrade one minor version at a time: ### Tanka deployment For deployments using Tanka configuration, since no instructions are provided for tanka specifically, -we recommend to follow the manual steps documented by CockroachDB: `Cluster Upgrade with Manual configs`. (See specific links below) -To apply the changes to your cluster, follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configuration, -especially the new image version (see [`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. +we recommend to follow the manual steps documented by CockroachDB: `Cluster Upgrade with Manual configs`. +(See specific links below) To apply the changes to your cluster, follow the manual steps and reflect the new +values in the *Leader* and *Followers* Tanka configurations, namely the new image version (see +[`VAR_CRDB_DOCKER_IMAGE_NAME`](../build/README.md)) to ensure the new configuration is aligned with the cluster state. #### 21.2.7 to 24.1.3 -CockroachDB requires to upgrade one minor version at a time. +CockroachDB requires to upgrade one minor version at a time, therefore the following migrations have to be performed: 1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=manual). 2. 22.1 to 22.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=manual). From 8f2a9e2874fe127be8c9f24642478c71b4485aa9 Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 00:48:48 +0200 Subject: [PATCH 11/12] Edits --- deploy/MIGRATION.md | 48 ++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 732de7085..09218aa1d 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -64,10 +64,10 @@ We recommend the second approach to keep your helm values in sync with the clust CockroachDB requires to upgrade one minor version at a time, therefore the following migrations have to be performed: 1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=helm). -2. 22.1 to 22.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=helm). -3. 22.2 to 23.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=helm). -4. 23.1 to 23.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=helm). -5. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=helm). +1. 22.1 to 22.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=helm). +1. 22.2 to 23.1: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=helm). +1. 23.1 to 23.2: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=helm). +1. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade for Helm](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=helm). ### Tanka deployment @@ -82,10 +82,10 @@ values in the *Leader* and *Followers* Tanka configurations, namely the new imag CockroachDB requires to upgrade one minor version at a time, therefore the following migrations have to be performed: 1. 21.2.7 to 22.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.1/upgrade-cockroachdb-kubernetes?filters=manual). -2. 22.1 to 22.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=manual). -3. 22.2 to 23.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=manual). -4. 23.1 to 23.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=manual). -5. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=manual). +1. 22.1 to 22.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v22.2/upgrade-cockroachdb-kubernetes?filters=manual). +1. 22.2 to 23.1: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroachdb-kubernetes?filters=manual). +1. 23.1 to 23.2: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v23.2/upgrade-cockroachdb-kubernetes?filters=manual). +1. 23.2 to 24.1.3: see [CockroachDB Cluster upgrade with Manual configs](https://www.cockroachlabs.com/docs/v24.1/upgrade-cockroachdb-kubernetes?filters=manual). ## Kubernetes upgrades @@ -105,8 +105,8 @@ Migrations of GKE clusters are managed using terraform. ```terraform kubernetes_version = 1.28 ``` -2. Run `terraform apply`. This operation may take more than 30min. -3. Monitor the upgrade of the nodes in the Google Cloud console. +1. Run `terraform apply`. This operation may take more than 30min. +1. Monitor the upgrade of the nodes in the Google Cloud console. #### 1.26 to 1.27 @@ -114,8 +114,8 @@ Migrations of GKE clusters are managed using terraform. ```terraform kubernetes_version = 1.27 ``` -2. Run `terraform apply`. This operation may take more than 30min. -3. Monitor the upgrade of the nodes in the Google Cloud console. +1. Run `terraform apply`. This operation may take more than 30min. +1. Monitor the upgrade of the nodes in the Google Cloud console. #### 1.25 to 1.26 @@ -123,8 +123,8 @@ Migrations of GKE clusters are managed using terraform. ```terraform kubernetes_version = 1.26 ``` -2. Run `terraform apply` -3. Monitor the upgrade of the nodes in the Google Cloud console. +1. Run `terraform apply` +1. Monitor the upgrade of the nodes in the Google Cloud console. #### 1.24 to 1.25 @@ -132,8 +132,8 @@ Migrations of GKE clusters are managed using terraform. ```terraform kubernetes_version = 1.25 ``` -2. Run `terraform apply`. This operation may take more than 30min. -3. Monitor the upgrade of the nodes in the Google Cloud console. +1. Run `terraform apply`. This operation may take more than 30min. +1. Monitor the upgrade of the nodes in the Google Cloud console. ### AWS - Elastic Kubernetes Service @@ -146,8 +146,8 @@ expected to be reported in the context of a standard deployment performed with t #### 1.27 to 1.28 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. -2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. -3. Change your `terraform.tfvars` to use `1.28` by adding or updating the `kubernetes_version` variable: +1. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. +1. Change your `terraform.tfvars` to use `1.28` by adding or updating the `kubernetes_version` variable: ```terraform kubernetes_version = 1.28 ``` @@ -155,8 +155,8 @@ expected to be reported in the context of a standard deployment performed with t #### 1.26 to 1.27 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. -2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. -3. Change your `terraform.tfvars` to use `1.27` by adding or updating the `kubernetes_version` variable: +1. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. +1. Change your `terraform.tfvars` to use `1.27` by adding or updating the `kubernetes_version` variable: ```terraform kubernetes_version = 1.27 ``` @@ -164,8 +164,8 @@ expected to be reported in the context of a standard deployment performed with t #### 1.25 to 1.26 1. Upgrade the cluster (control plane) using the AWS console. It should take ~15 minutes. -2. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. -3. Change your `terraform.tfvars` to use `1.26` by adding or updating the `kubernetes_version` variable: +1. Update the *Node Group* in the *Compute* tab with *Rolling Update* strategy to upgrade the nodes using the AWS console. +1. Change your `terraform.tfvars` to use `1.26` by adding or updating the `kubernetes_version` variable: ```terraform kubernetes_version = 1.26 ``` @@ -177,8 +177,8 @@ expected to be reported in the context of a standard deployment performed with t - Evaluate errors in Deprecated APIs removed in Kubernetes v1.25. Using `kubectl get podsecuritypolicies`, check if there is only one *Pod Security Policy* named `eks.privileged`. If it is the case, according to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-security-policy-removal-faq.html), you can proceed. -2. Upgrade the cluster using the AWS console. It should take ~15 minutes. -3. Change your `terraform.tfvars` to use `1.25` by adding or updating the `kubernetes_version` variable: +1. Upgrade the cluster using the AWS console. It should take ~15 minutes. +1. Change your `terraform.tfvars` to use `1.25` by adding or updating the `kubernetes_version` variable: ```terraform kubernetes_version = 1.25 ``` From 3f5fbb620bf0f9465481f77333642b1f4a90606e Mon Sep 17 00:00:00 2001 From: Michael Barroco Date: Fri, 30 Aug 2024 15:45:06 +0200 Subject: [PATCH 12/12] small edits --- deploy/MIGRATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/MIGRATION.md b/deploy/MIGRATION.md index 09218aa1d..69cb6e993 100644 --- a/deploy/MIGRATION.md +++ b/deploy/MIGRATION.md @@ -36,7 +36,7 @@ align your configuration with the new state of the cluster. If you deployed the DSS using the Helm chart and the instructions provided in this repository, follow the instructions provided by CockroachDB `Cluster Upgrade with Helm` (See specific links below). Note that the CockroachDB documentation -suggest to edit the values using `helm upgrade ... --set` commands. You will need to use the root key `cockroachdb` +suggests to edit the values using `helm upgrade ... --set` commands. You will need to use the root key `cockroachdb` since the cockroachdb Helm chart is a dependency of the dss chart. For instance, setting the image tag and partition using the command line would look like this: ``` @@ -71,7 +71,7 @@ CockroachDB requires to upgrade one minor version at a time, therefore the follo ### Tanka deployment -For deployments using Tanka configuration, since no instructions are provided for tanka specifically, +For deployments using Tanka configuration, since no instructions are provided for Tanka specifically, we recommend to follow the manual steps documented by CockroachDB: `Cluster Upgrade with Manual configs`. (See specific links below) To apply the changes to your cluster, follow the manual steps and reflect the new values in the *Leader* and *Followers* Tanka configurations, namely the new image version (see