diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce0130ef3a..ea85af4394 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: id: kind uses: engineerd/setup-kind@v0.5.0 with: - version: v0.24.0 + version: v0.25.0 - name: Prepare cluster for testing id: local-path diff --git a/docs/CHANGELOG-v2024.11.18.md b/docs/CHANGELOG-v2024.11.18.md new file mode 100644 index 0000000000..7db8c288db --- /dev/null +++ b/docs/CHANGELOG-v2024.11.18.md @@ -0,0 +1,787 @@ +--- +title: Changelog | KubeDB +description: Changelog +menu: + docs_{{.version}}: + identifier: changelog-kubedb-v2024.11.18 + name: Changelog-v2024.11.18 + parent: welcome + weight: 20241118 +product_name: kubedb +menu_name: docs_{{.version}} +section_menu_id: welcome +url: /docs/{{.version}}/welcome/changelog-v2024.11.18/ +aliases: + - /docs/{{.version}}/CHANGELOG-v2024.11.18/ +--- + +# KubeDB v2024.11.18 (2024-11-20) + + +## [kubedb/apimachinery](https://github.com/kubedb/apimachinery) + +### [v0.49.0](https://github.com/kubedb/apimachinery/releases/tag/v0.49.0) + + + + +## [kubedb/autoscaler](https://github.com/kubedb/autoscaler) + +### [v0.34.0](https://github.com/kubedb/autoscaler/releases/tag/v0.34.0) + +- [a67ba7eb](https://github.com/kubedb/autoscaler/commit/a67ba7eb) Prepare for release v0.34.0 (#229) +- [0e555ac9](https://github.com/kubedb/autoscaler/commit/0e555ac9) Prepare for release v0.34.0-rc.0 (#228) +- [9a723b6e](https://github.com/kubedb/autoscaler/commit/9a723b6e) Add autoscaler for solr (#227) +- [6fa6ac10](https://github.com/kubedb/autoscaler/commit/6fa6ac10) Use debian:12 base image (#226) +- [7b4b559f](https://github.com/kubedb/autoscaler/commit/7b4b559f) Use debian:12 base image (#225) + + + +## [kubedb/cassandra](https://github.com/kubedb/cassandra) + +### [v0.2.0](https://github.com/kubedb/cassandra/releases/tag/v0.2.0) + +- [a22e35fe](https://github.com/kubedb/cassandra/commit/a22e35fe) Prepare for release v0.2.0 (#12) +- [98f26f87](https://github.com/kubedb/cassandra/commit/98f26f87) Use kind v0.25.0 (#11) +- [2a6da5c4](https://github.com/kubedb/cassandra/commit/2a6da5c4) Add ReconcileState struct to pass reconciling objects as parameter (#9) +- [c8344e76](https://github.com/kubedb/cassandra/commit/c8344e76) Prepare for release v0.2.0-rc.0 (#10) +- [49bea527](https://github.com/kubedb/cassandra/commit/49bea527) Fix Petset observedGeneration issue & Add Monitoring Support (#7) +- [08e51996](https://github.com/kubedb/cassandra/commit/08e51996) Use debian:12 base image (#8) + + + +## [kubedb/cli](https://github.com/kubedb/cli) + +### [v0.49.0](https://github.com/kubedb/cli/releases/tag/v0.49.0) + +- [ce5dc68e](https://github.com/kubedb/cli/commit/ce5dc68e) Prepare for release v0.49.0 (#781) +- [a10a51fa](https://github.com/kubedb/cli/commit/a10a51fa) Prepare for release v0.49.0-rc.0 (#780) + + + +## [kubedb/clickhouse](https://github.com/kubedb/clickhouse) + +### [v0.4.0](https://github.com/kubedb/clickhouse/releases/tag/v0.4.0) + +- [7b61f24f](https://github.com/kubedb/clickhouse/commit/7b61f24f) Prepare for release v0.4.0 (#26) +- [4c83f38b](https://github.com/kubedb/clickhouse/commit/4c83f38b) Use kind v0.25.0 (#25) +- [4710110d](https://github.com/kubedb/clickhouse/commit/4710110d) Add ReconcileState struct to pass reconciling objects as parameter (#23) +- [c3e50828](https://github.com/kubedb/clickhouse/commit/c3e50828) Prepare for release v0.4.0-rc.0 (#24) +- [8dd25781](https://github.com/kubedb/clickhouse/commit/8dd25781) Use debian:12 base image (#21) + + + +## [kubedb/crd-manager](https://github.com/kubedb/crd-manager) + +### [v0.4.0](https://github.com/kubedb/crd-manager/releases/tag/v0.4.0) + +- [de8bb2d1](https://github.com/kubedb/crd-manager/commit/de8bb2d1) Prepare for release v0.4.0 (#55) +- [71f313ce](https://github.com/kubedb/crd-manager/commit/71f313ce) Prepare for release v0.4.0-rc.0 (#54) +- [27c3d99b](https://github.com/kubedb/crd-manager/commit/27c3d99b) Use debian:12 base image (#53) +- [f45b9afd](https://github.com/kubedb/crd-manager/commit/f45b9afd) Use debian:12 base image (#52) + + + +## [kubedb/dashboard-restic-plugin](https://github.com/kubedb/dashboard-restic-plugin) + +### [v0.7.0](https://github.com/kubedb/dashboard-restic-plugin/releases/tag/v0.7.0) + +- [75460de](https://github.com/kubedb/dashboard-restic-plugin/commit/75460de) Prepare for release v0.7.0 (#25) +- [b3a729b](https://github.com/kubedb/dashboard-restic-plugin/commit/b3a729b) Use debian:12 base image (#24) +- [7672ced](https://github.com/kubedb/dashboard-restic-plugin/commit/7672ced) Prepare for release v0.7.0-rc.0 (#23) +- [504cae4](https://github.com/kubedb/dashboard-restic-plugin/commit/504cae4) Use debian:12 base image (#22) + + + +## [kubedb/db-client-go](https://github.com/kubedb/db-client-go) + +### [v0.4.0](https://github.com/kubedb/db-client-go/releases/tag/v0.4.0) + +- [1138af27](https://github.com/kubedb/db-client-go/commit/1138af27) Prepare for release v0.4.0 (#152) +- [25a03892](https://github.com/kubedb/db-client-go/commit/25a03892) Add Support for Updating Druid Credential dynamically (#148) +- [e5ed4c59](https://github.com/kubedb/db-client-go/commit/e5ed4c59) Use pointer clientTLS filed (#151) +- [bc1b92c5](https://github.com/kubedb/db-client-go/commit/bc1b92c5) Prepare for release v0.4.0-rc.0 (#150) +- [e8210401](https://github.com/kubedb/db-client-go/commit/e8210401) Update Cassandra/client.go (#143) +- [8f470859](https://github.com/kubedb/db-client-go/commit/8f470859) adding tls for pgbouncer (#149) +- [7e70d363](https://github.com/kubedb/db-client-go/commit/7e70d363) Fix druid auth name (#147) +- [234b5778](https://github.com/kubedb/db-client-go/commit/234b5778) Update apimachinery Dependency (#146) +- [8b8905b9](https://github.com/kubedb/db-client-go/commit/8b8905b9) Update deps & fix authsecret mutation (#145) +- [db011c56](https://github.com/kubedb/db-client-go/commit/db011c56) Add TLS config to druid client (#139) +- [fbbd601a](https://github.com/kubedb/db-client-go/commit/fbbd601a) Use debian:12 base image (#144) +- [7b28debe](https://github.com/kubedb/db-client-go/commit/7b28debe) Add DeleteCollection method for Solr (#141) + + + +## [kubedb/druid](https://github.com/kubedb/druid) + +### [v0.4.0](https://github.com/kubedb/druid/releases/tag/v0.4.0) + +- [00c82ce5](https://github.com/kubedb/druid/commit/00c82ce5) Prepare for release v0.4.0 (#60) +- [bbdc017f](https://github.com/kubedb/druid/commit/bbdc017f) Update RotateAuth Ops based changes (#58) +- [a8ef93f3](https://github.com/kubedb/druid/commit/a8ef93f3) Use kind v0.25.0 (#59) +- [fbfafa4c](https://github.com/kubedb/druid/commit/fbfafa4c) Update Structure of ReconcileState (#57) +- [7826e04a](https://github.com/kubedb/druid/commit/7826e04a) Update dep (#56) +- [3cb337c4](https://github.com/kubedb/druid/commit/3cb337c4) Add Druid ReconcileState as receiver from Reconcile (#54) +- [73e2f14b](https://github.com/kubedb/druid/commit/73e2f14b) Prepare for release v0.4.0-rc.0 (#55) +- [cb3fc57a](https://github.com/kubedb/druid/commit/cb3fc57a) Fix druid auth secret name (#52) +- [20da2c3a](https://github.com/kubedb/druid/commit/20da2c3a) Add druid TLS (#45) +- [9f72c688](https://github.com/kubedb/druid/commit/9f72c688) Use debian:12 base image (#51) + + + +## [kubedb/elasticsearch](https://github.com/kubedb/elasticsearch) + +### [v0.49.0](https://github.com/kubedb/elasticsearch/releases/tag/v0.49.0) + +- [32a036e1](https://github.com/kubedb/elasticsearch/commit/32a036e19) Prepare for release v0.49.0 (#740) +- [a10a337e](https://github.com/kubedb/elasticsearch/commit/a10a337e3) Fix authsecret name and validator to configure rotateauth ops request (#739) +- [d3aaacf3](https://github.com/kubedb/elasticsearch/commit/d3aaacf37) Prepare for release v0.49.0-rc.0 (#738) +- [57de6b5e](https://github.com/kubedb/elasticsearch/commit/57de6b5e6) Fix local Webhook Registration (#737) +- [494c5f03](https://github.com/kubedb/elasticsearch/commit/494c5f03b) Use debian:12 base image (#736) +- [986f6f22](https://github.com/kubedb/elasticsearch/commit/986f6f224) Use debian:12 base image (#735) + + + +## [kubedb/elasticsearch-restic-plugin](https://github.com/kubedb/elasticsearch-restic-plugin) + +### [v0.12.0](https://github.com/kubedb/elasticsearch-restic-plugin/releases/tag/v0.12.0) + +- [1d9e4a2](https://github.com/kubedb/elasticsearch-restic-plugin/commit/1d9e4a2) Prepare for release v0.12.0 (#48) +- [dd617de](https://github.com/kubedb/elasticsearch-restic-plugin/commit/dd617de) Use debian:12 base image (#47) +- [3e60416](https://github.com/kubedb/elasticsearch-restic-plugin/commit/3e60416) Prepare for release v0.12.0-rc.0 (#46) + + + +## [kubedb/ferretdb](https://github.com/kubedb/ferretdb) + +### [v0.4.0](https://github.com/kubedb/ferretdb/releases/tag/v0.4.0) + +- [c955d034](https://github.com/kubedb/ferretdb/commit/c955d034) Prepare for release v0.4.0 (#52) +- [f79f7989](https://github.com/kubedb/ferretdb/commit/f79f7989) Update deps (#51) +- [41f5baba](https://github.com/kubedb/ferretdb/commit/41f5baba) Add ReconcileState as receiver from Reconcile (#50) +- [46e66e54](https://github.com/kubedb/ferretdb/commit/46e66e54) Prepare for release v0.4.0-rc.0 (#49) +- [d6dbb82a](https://github.com/kubedb/ferretdb/commit/d6dbb82a) Update pg tp v1 (#47) +- [498cf1e3](https://github.com/kubedb/ferretdb/commit/498cf1e3) Use debian:12 base image (#46) + + + +## [kubedb/installer](https://github.com/kubedb/installer) + +### [v2024.11.18](https://github.com/kubedb/installer/releases/tag/v2024.11.18) + + + + +## [kubedb/kafka](https://github.com/kubedb/kafka) + +### [v0.20.0](https://github.com/kubedb/kafka/releases/tag/v0.20.0) + +- [cff9ac07](https://github.com/kubedb/kafka/commit/cff9ac07) Prepare for release v0.20.0 (#120) +- [122c28db](https://github.com/kubedb/kafka/commit/122c28db) Use kind v0.25.0 (#119) +- [03868d66](https://github.com/kubedb/kafka/commit/03868d66) Update annotations name with constants (#117) +- [0cfb8df3](https://github.com/kubedb/kafka/commit/0cfb8df3) Update RotateAuth Ops based changes (#112) +- [f767cdbf](https://github.com/kubedb/kafka/commit/f767cdbf) Add Kafka ReconcileState as receiver from Reconcile (#114) +- [9a007bb4](https://github.com/kubedb/kafka/commit/9a007bb4) Install monitoring stuffs in the daily (#113) +- [c80ff7a2](https://github.com/kubedb/kafka/commit/c80ff7a2) Prepare for release v0.20.0-rc.0 (#116) +- [96e3de84](https://github.com/kubedb/kafka/commit/96e3de84) Prepare for release v0.42.0-rc.0 +- [6f702d94](https://github.com/kubedb/kafka/commit/6f702d94) Fix breaking helper functions (#115) +- [7e43c33f](https://github.com/kubedb/kafka/commit/7e43c33f) Use debian:12 base image (#111) + + + +## [kubedb/kibana](https://github.com/kubedb/kibana) + +### [v0.25.0](https://github.com/kubedb/kibana/releases/tag/v0.25.0) + +- [f2a01a30](https://github.com/kubedb/kibana/commit/f2a01a30) Prepare for release v0.25.0 (#131) +- [772026c0](https://github.com/kubedb/kibana/commit/772026c0) Prepare for release v0.25.0-rc.0 (#130) +- [ffd6e402](https://github.com/kubedb/kibana/commit/ffd6e402) Use debian:12 base image (#129) +- [0dc26c52](https://github.com/kubedb/kibana/commit/0dc26c52) Use debian:12 base image (#128) + + + +## [kubedb/kubedb-manifest-plugin](https://github.com/kubedb/kubedb-manifest-plugin) + +### [v0.12.0](https://github.com/kubedb/kubedb-manifest-plugin/releases/tag/v0.12.0) + +- [2dcc07c](https://github.com/kubedb/kubedb-manifest-plugin/commit/2dcc07c) Prepare for release v0.12.0 (#78) +- [dce514e](https://github.com/kubedb/kubedb-manifest-plugin/commit/dce514e) Use debian:12 base image (#77) +- [7021daa](https://github.com/kubedb/kubedb-manifest-plugin/commit/7021daa) Prepare for release v0.12.0-rc.0 (#76) +- [bea1697](https://github.com/kubedb/kubedb-manifest-plugin/commit/bea1697) Use debian:12 base image (#75) +- [dd34241](https://github.com/kubedb/kubedb-manifest-plugin/commit/dd34241) Add Manifest backup/restore support for redis (#74) + + + +## [kubedb/mariadb](https://github.com/kubedb/mariadb) + +### [v0.33.0](https://github.com/kubedb/mariadb/releases/tag/v0.33.0) + +- [9435f6ee](https://github.com/kubedb/mariadb/commit/9435f6eec) Prepare for release v0.33.0 (#291) +- [db75a3f5](https://github.com/kubedb/mariadb/commit/db75a3f5e) Add all required const for archiver (#289) +- [b73a732a](https://github.com/kubedb/mariadb/commit/b73a732ad) Prepare for release v0.33.0-rc.0 (#290) +- [bd9c114a](https://github.com/kubedb/mariadb/commit/bd9c114a4) Use debian:12 base image (#288) + + + +## [kubedb/mariadb-archiver](https://github.com/kubedb/mariadb-archiver) + +### [v0.9.0](https://github.com/kubedb/mariadb-archiver/releases/tag/v0.9.0) + +- [c645ef42](https://github.com/kubedb/mariadb-archiver/commit/c645ef42) Prepare for release v0.9.0 (#31) +- [e661da41](https://github.com/kubedb/mariadb-archiver/commit/e661da41) Add all required const for archiver (#29) +- [8498c843](https://github.com/kubedb/mariadb-archiver/commit/8498c843) Prepare for release v0.9.0-rc.0 (#30) + + + +## [kubedb/mariadb-coordinator](https://github.com/kubedb/mariadb-coordinator) + +### [v0.29.0](https://github.com/kubedb/mariadb-coordinator/releases/tag/v0.29.0) + +- [eae4bd98](https://github.com/kubedb/mariadb-coordinator/commit/eae4bd98) Prepare for release v0.29.0 (#130) +- [f1a7ed08](https://github.com/kubedb/mariadb-coordinator/commit/f1a7ed08) Prepare for release v0.29.0-rc.0 (#129) +- [896d4206](https://github.com/kubedb/mariadb-coordinator/commit/896d4206) Use debian:12 base image (#128) + + + +## [kubedb/mariadb-csi-snapshotter-plugin](https://github.com/kubedb/mariadb-csi-snapshotter-plugin) + +### [v0.9.0](https://github.com/kubedb/mariadb-csi-snapshotter-plugin/releases/tag/v0.9.0) + +- [3096d80](https://github.com/kubedb/mariadb-csi-snapshotter-plugin/commit/3096d80) Prepare for release v0.9.0 (#34) +- [92594f3](https://github.com/kubedb/mariadb-csi-snapshotter-plugin/commit/92594f3) Prepare for release v0.9.0-rc.0 (#33) +- [cdc664f](https://github.com/kubedb/mariadb-csi-snapshotter-plugin/commit/cdc664f) Use debian:12 base image (#32) + + + +## [kubedb/mariadb-restic-plugin](https://github.com/kubedb/mariadb-restic-plugin) + +### [v0.7.0](https://github.com/kubedb/mariadb-restic-plugin/releases/tag/v0.7.0) + +- [bf019c5](https://github.com/kubedb/mariadb-restic-plugin/commit/bf019c5) Prepare for release v0.7.0 (#30) +- [8583fb5](https://github.com/kubedb/mariadb-restic-plugin/commit/8583fb5) Use debian:12 base image (#29) +- [d9af547](https://github.com/kubedb/mariadb-restic-plugin/commit/d9af547) Prepare for release v0.7.0-rc.0 (#28) +- [610dab6](https://github.com/kubedb/mariadb-restic-plugin/commit/610dab6) Add external databases backup/restore support (#27) + + + +## [kubedb/memcached](https://github.com/kubedb/memcached) + +### [v0.42.0](https://github.com/kubedb/memcached/releases/tag/v0.42.0) + + + + +## [kubedb/mongodb](https://github.com/kubedb/mongodb) + +### [v0.42.0](https://github.com/kubedb/mongodb/releases/tag/v0.42.0) + +- [92b2774e](https://github.com/kubedb/mongodb/commit/92b2774e6) Prepare for release v0.42.0 (#666) +- [520bbe55](https://github.com/kubedb/mongodb/commit/520bbe55a) Add `basic-auth-active-from` annotation in auth secret (#665) +- [df91f37e](https://github.com/kubedb/mongodb/commit/df91f37e3) Prepare for release v0.42.0-rc.0 (#664) +- [decf3281](https://github.com/kubedb/mongodb/commit/decf32819) Fix MongoDBArchiver for Minio TLS backend (#659) +- [d1b57cfe](https://github.com/kubedb/mongodb/commit/d1b57cfe0) Use debian:12 base image (#663) + + + +## [kubedb/mongodb-csi-snapshotter-plugin](https://github.com/kubedb/mongodb-csi-snapshotter-plugin) + +### [v0.10.0](https://github.com/kubedb/mongodb-csi-snapshotter-plugin/releases/tag/v0.10.0) + +- [a1eefb7](https://github.com/kubedb/mongodb-csi-snapshotter-plugin/commit/a1eefb7) Prepare for release v0.10.0 (#39) +- [c84b8c0](https://github.com/kubedb/mongodb-csi-snapshotter-plugin/commit/c84b8c0) Prepare for release v0.10.0-rc.0 (#38) +- [bca72d6](https://github.com/kubedb/mongodb-csi-snapshotter-plugin/commit/bca72d6) Use debian:12 base image (#37) +- [780554d](https://github.com/kubedb/mongodb-csi-snapshotter-plugin/commit/780554d) Use debian:12 base image (#36) + + + +## [kubedb/mongodb-restic-plugin](https://github.com/kubedb/mongodb-restic-plugin) + +### [v0.12.0](https://github.com/kubedb/mongodb-restic-plugin/releases/tag/v0.12.0) + +- [b1f1881](https://github.com/kubedb/mongodb-restic-plugin/commit/b1f1881) Prepare for release v0.12.0 (#70) +- [1eb3fe7](https://github.com/kubedb/mongodb-restic-plugin/commit/1eb3fe7) Add support for MongoDB version 8.0.0 (#67) +- [09ee7f4](https://github.com/kubedb/mongodb-restic-plugin/commit/09ee7f4) Use debian:12 base image (#69) +- [3f6d291](https://github.com/kubedb/mongodb-restic-plugin/commit/3f6d291) Prepare for release v0.12.0-rc.0 (#68) + + + +## [kubedb/mssql-coordinator](https://github.com/kubedb/mssql-coordinator) + +### [v0.4.0](https://github.com/kubedb/mssql-coordinator/releases/tag/v0.4.0) + +- [9e567ec3](https://github.com/kubedb/mssql-coordinator/commit/9e567ec3) Prepare for release v0.4.0 (#21) +- [ae1c7c69](https://github.com/kubedb/mssql-coordinator/commit/ae1c7c69) Prepare for release v0.4.0-rc.0 (#20) +- [395372a7](https://github.com/kubedb/mssql-coordinator/commit/395372a7) Check AG Database exists before resume, clean LSN string (#18) +- [505f0d8e](https://github.com/kubedb/mssql-coordinator/commit/505f0d8e) Use debian:12 base image (#19) + + + +## [kubedb/mssqlserver](https://github.com/kubedb/mssqlserver) + +### [v0.4.0](https://github.com/kubedb/mssqlserver/releases/tag/v0.4.0) + +- [034c2045](https://github.com/kubedb/mssqlserver/commit/034c2045) Prepare for release v0.4.0 (#40) +- [239a842f](https://github.com/kubedb/mssqlserver/commit/239a842f) Add Reconfigure TLS changes (#38) +- [244921e8](https://github.com/kubedb/mssqlserver/commit/244921e8) Use kind v0.25.0 (#39) +- [e7609913](https://github.com/kubedb/mssqlserver/commit/e7609913) Add ReconcileState struct as reconcile methods receiver (#36) +- [b43a0b19](https://github.com/kubedb/mssqlserver/commit/b43a0b19) Prepare for release v0.4.0-rc.0 (#37) +- [69c1a4de](https://github.com/kubedb/mssqlserver/commit/69c1a4de) Use debian:12 base image (#35) +- [c243e70b](https://github.com/kubedb/mssqlserver/commit/c243e70b) Add some fixes: Validate user envs (#33) +- [39de8531](https://github.com/kubedb/mssqlserver/commit/39de8531) Skip creating extra initial backup session for pitr (#34) + + + +## [kubedb/mssqlserver-archiver](https://github.com/kubedb/mssqlserver-archiver) + +### [v0.3.0](https://github.com/kubedb/mssqlserver-archiver/releases/tag/v0.3.0) + +- [6e67fd4](https://github.com/kubedb/mssqlserver-archiver/commit/6e67fd4) Remove matrix build +- [8ab2080](https://github.com/kubedb/mssqlserver-archiver/commit/8ab2080) Use debian:12 base image (#4) + + + +## [kubedb/mssqlserver-walg-plugin](https://github.com/kubedb/mssqlserver-walg-plugin) + +### [v0.3.0](https://github.com/kubedb/mssqlserver-walg-plugin/releases/tag/v0.3.0) + +- [2364689](https://github.com/kubedb/mssqlserver-walg-plugin/commit/2364689) Prepare for release v0.3.0 (#10) +- [e05e217](https://github.com/kubedb/mssqlserver-walg-plugin/commit/e05e217) Prepare for release v0.3.0-rc.0 (#9) +- [4a9db7b](https://github.com/kubedb/mssqlserver-walg-plugin/commit/4a9db7b) Use debian:12 base image (#8) + + + +## [kubedb/mysql](https://github.com/kubedb/mysql) + +### [v0.42.0](https://github.com/kubedb/mysql/releases/tag/v0.42.0) + +- [a32968b9](https://github.com/kubedb/mysql/commit/a32968b9c) Prepare for release v0.42.0 (#651) +- [de193669](https://github.com/kubedb/mysql/commit/de1936692) Add Multi Primary Mode Support for V8.4.2 (#650) +- [1ab34fa2](https://github.com/kubedb/mysql/commit/1ab34fa2e) Prepare for release v0.42.0-rc.0 (#649) + + + +## [kubedb/mysql-archiver](https://github.com/kubedb/mysql-archiver) + +### [v0.10.0](https://github.com/kubedb/mysql-archiver/releases/tag/v0.10.0) + +- [120c9ffc](https://github.com/kubedb/mysql-archiver/commit/120c9ffc) Prepare for release v0.10.0 (#44) +- [beb27b22](https://github.com/kubedb/mysql-archiver/commit/beb27b22) Prepare for release v0.10.0-rc.0 (#43) + + + +## [kubedb/mysql-coordinator](https://github.com/kubedb/mysql-coordinator) + +### [v0.27.0](https://github.com/kubedb/mysql-coordinator/releases/tag/v0.27.0) + +- [31e0281e](https://github.com/kubedb/mysql-coordinator/commit/31e0281e) Prepare for release v0.27.0 (#128) +- [81d40c27](https://github.com/kubedb/mysql-coordinator/commit/81d40c27) Prepare for release v0.27.0-rc.0 (#127) + + + +## [kubedb/mysql-csi-snapshotter-plugin](https://github.com/kubedb/mysql-csi-snapshotter-plugin) + +### [v0.10.0](https://github.com/kubedb/mysql-csi-snapshotter-plugin/releases/tag/v0.10.0) + +- [8fcb3eb](https://github.com/kubedb/mysql-csi-snapshotter-plugin/commit/8fcb3eb) Prepare for release v0.10.0 (#34) +- [be7cf34](https://github.com/kubedb/mysql-csi-snapshotter-plugin/commit/be7cf34) Prepare for release v0.10.0-rc.0 (#33) + + + +## [kubedb/mysql-restic-plugin](https://github.com/kubedb/mysql-restic-plugin) + +### [v0.12.0](https://github.com/kubedb/mysql-restic-plugin/releases/tag/v0.12.0) + +- [e08ce0c](https://github.com/kubedb/mysql-restic-plugin/commit/e08ce0c) Prepare for release v0.12.0 (#62) +- [54c8bc8](https://github.com/kubedb/mysql-restic-plugin/commit/54c8bc8) Use debian:12 base image (#60) +- [310725e](https://github.com/kubedb/mysql-restic-plugin/commit/310725e) Prepare for release v0.12.0-rc.0 (#59) + + + +## [kubedb/mysql-router-init](https://github.com/kubedb/mysql-router-init) + +### [v0.27.0](https://github.com/kubedb/mysql-router-init/releases/tag/v0.27.0) + +- [a0ee1f6](https://github.com/kubedb/mysql-router-init/commit/a0ee1f6) Use debian:12 base image (#47) + + + +## [kubedb/ops-manager](https://github.com/kubedb/ops-manager) + +### [v0.36.0](https://github.com/kubedb/ops-manager/releases/tag/v0.36.0) + +- [8777963d](https://github.com/kubedb/ops-manager/commit/8777963df) Prepare for release v0.36.0 (#672) +- [6c406557](https://github.com/kubedb/ops-manager/commit/6c4065573) Add rotateauth ops request for elasticsearch (#671) +- [c2e7bdc2](https://github.com/kubedb/ops-manager/commit/c2e7bdc23) Restart For PgBouncer (#651) +- [e0357528](https://github.com/kubedb/ops-manager/commit/e03575284) Add rotateauth ops request for solr (#670) +- [9480d0dd](https://github.com/kubedb/ops-manager/commit/9480d0ddc) Add Druid Rotate-Auth OpsRequest (#653) +- [26d26e0f](https://github.com/kubedb/ops-manager/commit/26d26e0f5) Add RotateAuth support for MongoDBOpsRequest (#664) +- [4ab4f623](https://github.com/kubedb/ops-manager/commit/4ab4f6233) Add mssqlserver ops request reconfigure tls (#652) +- [482ced11](https://github.com/kubedb/ops-manager/commit/482ced11d) Add Rotate Auth Secret Support for Postgres and fix arbiter spec (#669) +- [a35a08e5](https://github.com/kubedb/ops-manager/commit/a35a08e5a) Add Memcached TLS and OpsRequest Reconfigure TLS (#654) +- [fe04070b](https://github.com/kubedb/ops-manager/commit/fe04070b4) Update Pgpool operator reconcile struct changes (#668) +- [29edd77d](https://github.com/kubedb/ops-manager/commit/29edd77d5) Update SingleStore for ReconcilerState Change (#666) +- [53a7941a](https://github.com/kubedb/ops-manager/commit/53a7941a2) Fix Keystore Secret reference for Kafka ReconfigureTLS (#663) +- [b01a8869](https://github.com/kubedb/ops-manager/commit/b01a88698) Add Validator for Druid Ops-Requests (#665) +- [8275c2a5](https://github.com/kubedb/ops-manager/commit/8275c2a51) Updates for Druid ReconcileState Structure (#667) +- [545bd0c0](https://github.com/kubedb/ops-manager/commit/545bd0c08) Add Kafka Rotate Auth Ops Request (#644) +- [5bc565e4](https://github.com/kubedb/ops-manager/commit/5bc565e4a) Sync ReconcileState changes for RabbitMQ (#658) +- [fb456387](https://github.com/kubedb/ops-manager/commit/fb456387c) Add MSSQLServer Reconfigure Ops request (#634) +- [03af4451](https://github.com/kubedb/ops-manager/commit/03af4451a) Update Zookeeper for Reconcile Changes (#662) +- [7ab6c269](https://github.com/kubedb/ops-manager/commit/7ab6c2694) Fix Druid ReconcileState Changes (#661) +- [250cd5c2](https://github.com/kubedb/ops-manager/commit/250cd5c22) Update FerretDB operator reconcile struct changes (#655) +- [0b0ad7cd](https://github.com/kubedb/ops-manager/commit/0b0ad7cdf) Add ReconcilerState changes in solr (#659) +- [f0ac65c8](https://github.com/kubedb/ops-manager/commit/f0ac65c84) Fix Kafka ReconcileState Changes (#657) +- [370b9f98](https://github.com/kubedb/ops-manager/commit/370b9f98f) Fix Fastspeed standalone postgres db upgrade issue (#648) +- [eb4dfe2f](https://github.com/kubedb/ops-manager/commit/eb4dfe2fe) Prepare for release v0.36.0-rc.0 (#656) +- [a03bb54e](https://github.com/kubedb/ops-manager/commit/a03bb54e3) Update all db deps +- [6b0cf17f](https://github.com/kubedb/ops-manager/commit/6b0cf17fa) Fix recommendation (#642) +- [1b049295](https://github.com/kubedb/ops-manager/commit/1b0492953) Add support for Druid TLS and Ops-Requests (#629) +- [e72ae41d](https://github.com/kubedb/ops-manager/commit/e72ae41dc) Fix reconfigure tls mg patch issue (#650) +- [5d328f00](https://github.com/kubedb/ops-manager/commit/5d328f00b) Use debian:12 base image (#647) +- [1f32a6cf](https://github.com/kubedb/ops-manager/commit/1f32a6cf0) Use debian:12 base image (#646) +- [3d69bbcc](https://github.com/kubedb/ops-manager/commit/3d69bbcc7) Update zk deps (#645) +- [c71245eb](https://github.com/kubedb/ops-manager/commit/c71245eb4) Add TLS for ZooKeeper (#640) +- [0624ed34](https://github.com/kubedb/ops-manager/commit/0624ed346) Update deps (#643) +- [0c2abc62](https://github.com/kubedb/ops-manager/commit/0c2abc62f) Fix Ops Request Reconfiguration and Version Update (#641) + + + +## [kubedb/percona-xtradb](https://github.com/kubedb/percona-xtradb) + +### [v0.36.0](https://github.com/kubedb/percona-xtradb/releases/tag/v0.36.0) + +- [c32a5a7b](https://github.com/kubedb/percona-xtradb/commit/c32a5a7bf) Prepare for release v0.36.0 (#384) +- [de5d228a](https://github.com/kubedb/percona-xtradb/commit/de5d228ac) Prepare for release v0.36.0-rc.0 (#383) +- [23f321d4](https://github.com/kubedb/percona-xtradb/commit/23f321d42) Use debian:12 base image (#382) + + + +## [kubedb/percona-xtradb-coordinator](https://github.com/kubedb/percona-xtradb-coordinator) + +### [v0.22.0](https://github.com/kubedb/percona-xtradb-coordinator/releases/tag/v0.22.0) + +- [00f8797c](https://github.com/kubedb/percona-xtradb-coordinator/commit/00f8797c) Prepare for release v0.22.0 (#84) +- [23075738](https://github.com/kubedb/percona-xtradb-coordinator/commit/23075738) Prepare for release v0.22.0-rc.0 (#83) +- [86853e65](https://github.com/kubedb/percona-xtradb-coordinator/commit/86853e65) Use debian:12 base image (#82) + + + +## [kubedb/pg-coordinator](https://github.com/kubedb/pg-coordinator) + +### [v0.33.0](https://github.com/kubedb/pg-coordinator/releases/tag/v0.33.0) + +- [5fc9ab78](https://github.com/kubedb/pg-coordinator/commit/5fc9ab78) Prepare for release v0.33.0 (#177) +- [f8d96212](https://github.com/kubedb/pg-coordinator/commit/f8d96212) Prepare for release v0.33.0-rc.0 (#176) +- [0e372546](https://github.com/kubedb/pg-coordinator/commit/0e372546) Add PITR modes support (#175) +- [87ab3dfe](https://github.com/kubedb/pg-coordinator/commit/87ab3dfe) Use debian:12 base image (#174) + + + +## [kubedb/pgbouncer](https://github.com/kubedb/pgbouncer) + +### [v0.36.0](https://github.com/kubedb/pgbouncer/releases/tag/v0.36.0) + +- [34d3b17f](https://github.com/kubedb/pgbouncer/commit/34d3b17f) Prepare for release v0.36.0 (#351) +- [5b075c85](https://github.com/kubedb/pgbouncer/commit/5b075c85) Prepare for release v0.36.0-rc.0 (#350) +- [5780e16a](https://github.com/kubedb/pgbouncer/commit/5780e16a) Use debian:12 base image (#348) + + + +## [kubedb/pgpool](https://github.com/kubedb/pgpool) + +### [v0.4.0](https://github.com/kubedb/pgpool/releases/tag/v0.4.0) + +- [3d42bf8c](https://github.com/kubedb/pgpool/commit/3d42bf8c) Prepare for release v0.4.0 (#53) +- [5e4336fe](https://github.com/kubedb/pgpool/commit/5e4336fe) Reconciler state (#52) +- [e2bfcc3a](https://github.com/kubedb/pgpool/commit/e2bfcc3a) Update dep (#51) +- [5422557f](https://github.com/kubedb/pgpool/commit/5422557f) Prepare for release v0.4.0-rc.0 (#50) +- [b1e214ca](https://github.com/kubedb/pgpool/commit/b1e214ca) Use debian:12 base image (#49) + + + +## [kubedb/postgres](https://github.com/kubedb/postgres) + +### [v0.49.0](https://github.com/kubedb/postgres/releases/tag/v0.49.0) + +- [51becf1b](https://github.com/kubedb/postgres/commit/51becf1bb) Prepare for release v0.49.0 (#770) +- [0fee8ec5](https://github.com/kubedb/postgres/commit/0fee8ec5c) Fix arbiter spec on odd replicas and support for rotate auth secret with activeFrom api (#766) +- [990c6130](https://github.com/kubedb/postgres/commit/990c6130e) Use separate steps for Autoscaler, Healthchecker, Customization profile in Daily testing (#767) +- [887ad3de](https://github.com/kubedb/postgres/commit/887ad3dee) Fix RunAsGroup (#768) +- [cda07c6c](https://github.com/kubedb/postgres/commit/cda07c6ca) Fix daily CI name +- [a40ef294](https://github.com/kubedb/postgres/commit/a40ef2949) Add Exporters test prerequisites in Daily CI (#765) +- [a8a62aea](https://github.com/kubedb/postgres/commit/a8a62aea3) Prepare for release v0.49.0-rc.0 (#764) +- [c39cd034](https://github.com/kubedb/postgres/commit/c39cd034f) Fix cross region restore issue (#763) +- [be0d9cba](https://github.com/kubedb/postgres/commit/be0d9cba1) Use debian:12 base image (#761) + + + +## [kubedb/postgres-archiver](https://github.com/kubedb/postgres-archiver) + +### [v0.10.0](https://github.com/kubedb/postgres-archiver/releases/tag/v0.10.0) + +- [03d67011](https://github.com/kubedb/postgres-archiver/commit/03d67011) Prepare for release v0.10.0 (#42) +- [9fa2492a](https://github.com/kubedb/postgres-archiver/commit/9fa2492a) Prepare for release v0.10.0-rc.0 (#41) + + + +## [kubedb/postgres-csi-snapshotter-plugin](https://github.com/kubedb/postgres-csi-snapshotter-plugin) + +### [v0.10.0](https://github.com/kubedb/postgres-csi-snapshotter-plugin/releases/tag/v0.10.0) + +- [08fe128](https://github.com/kubedb/postgres-csi-snapshotter-plugin/commit/08fe128) Prepare for release v0.10.0 (#42) +- [a48321d](https://github.com/kubedb/postgres-csi-snapshotter-plugin/commit/a48321d) Prepare for release v0.10.0-rc.0 (#41) +- [1571bab](https://github.com/kubedb/postgres-csi-snapshotter-plugin/commit/1571bab) Use debian:12 base image (#40) +- [7393bf4](https://github.com/kubedb/postgres-csi-snapshotter-plugin/commit/7393bf4) Use debian:12 base image (#39) + + + +## [kubedb/postgres-restic-plugin](https://github.com/kubedb/postgres-restic-plugin) + +### [v0.12.0](https://github.com/kubedb/postgres-restic-plugin/releases/tag/v0.12.0) + +- [b71f30a](https://github.com/kubedb/postgres-restic-plugin/commit/b71f30a) Prepare for release v0.12.0 (#57) +- [77b0c1b](https://github.com/kubedb/postgres-restic-plugin/commit/77b0c1b) Use debian:12 base image (#56) +- [f744dce](https://github.com/kubedb/postgres-restic-plugin/commit/f744dce) Prepare for release v0.12.0-rc.0 (#55) + + + +## [kubedb/provider-aws](https://github.com/kubedb/provider-aws) + +### [v0.11.0](https://github.com/kubedb/provider-aws/releases/tag/v0.11.0) + + + + +## [kubedb/provider-azure](https://github.com/kubedb/provider-azure) + +### [v0.11.0](https://github.com/kubedb/provider-azure/releases/tag/v0.11.0) + + + + +## [kubedb/provider-gcp](https://github.com/kubedb/provider-gcp) + +### [v0.11.0](https://github.com/kubedb/provider-gcp/releases/tag/v0.11.0) + + + + +## [kubedb/provisioner](https://github.com/kubedb/provisioner) + +### [v0.49.0](https://github.com/kubedb/provisioner/releases/tag/v0.49.0) + +- [3e2cb353](https://github.com/kubedb/provisioner/commit/3e2cb3538) Prepare for release v0.49.0 (#122) +- [1fec3354](https://github.com/kubedb/provisioner/commit/1fec33545) Prepare for release v0.49.0-rc.0 (#121) +- [ad392149](https://github.com/kubedb/provisioner/commit/ad392149a) Update all db deps + + + +## [kubedb/proxysql](https://github.com/kubedb/proxysql) + +### [v0.36.0](https://github.com/kubedb/proxysql/releases/tag/v0.36.0) + +- [195ba40d](https://github.com/kubedb/proxysql/commit/195ba40da) Prepare for release v0.36.0 (#366) +- [bce09dcf](https://github.com/kubedb/proxysql/commit/bce09dcfe) Use kind v0.25.0 (#365) +- [a6c29b60](https://github.com/kubedb/proxysql/commit/a6c29b60b) Prepare for release v0.36.0-rc.0 (#364) +- [ad781b1e](https://github.com/kubedb/proxysql/commit/ad781b1ec) Use debian:12 base image (#362) + + + +## [kubedb/rabbitmq](https://github.com/kubedb/rabbitmq) + +### [v0.4.0](https://github.com/kubedb/rabbitmq/releases/tag/v0.4.0) + +- [9115622a](https://github.com/kubedb/rabbitmq/commit/9115622a) Prepare for release v0.4.0 (#54) +- [0b692417](https://github.com/kubedb/rabbitmq/commit/0b692417) Use kind v0.25.0 (#53) +- [0209b44a](https://github.com/kubedb/rabbitmq/commit/0209b44a) Add ReconcileState as receiver from Reconcile (#52) +- [3eabf7e9](https://github.com/kubedb/rabbitmq/commit/3eabf7e9) Prepare for release v0.4.0-rc.0 (#51) +- [bb92e142](https://github.com/kubedb/rabbitmq/commit/bb92e142) Revert "Add ReconcileState struct as reconcile methods receiver (#50)" +- [208e2ebc](https://github.com/kubedb/rabbitmq/commit/208e2ebc) Add ReconcileState struct as reconcile methods receiver (#50) +- [c62f41c0](https://github.com/kubedb/rabbitmq/commit/c62f41c0) Use debian:12 base image (#49) + + + +## [kubedb/redis](https://github.com/kubedb/redis) + +### [v0.42.0](https://github.com/kubedb/redis/releases/tag/v0.42.0) + +- [67b02743](https://github.com/kubedb/redis/commit/67b027437) Prepare for release v0.42.0 (#565) +- [1869d651](https://github.com/kubedb/redis/commit/1869d6514) Prepare for release v0.42.0-rc.0 (#564) +- [60db928f](https://github.com/kubedb/redis/commit/60db928f4) Use debian:12 base image (#563) + + + +## [kubedb/redis-coordinator](https://github.com/kubedb/redis-coordinator) + +### [v0.28.0](https://github.com/kubedb/redis-coordinator/releases/tag/v0.28.0) + +- [c1a3dfa7](https://github.com/kubedb/redis-coordinator/commit/c1a3dfa7) Prepare for release v0.28.0 (#115) +- [9bbeb8af](https://github.com/kubedb/redis-coordinator/commit/9bbeb8af) Prepare for release v0.28.0-rc.0 (#114) +- [81465dcd](https://github.com/kubedb/redis-coordinator/commit/81465dcd) Use debian:12 base image (#113) + + + +## [kubedb/redis-restic-plugin](https://github.com/kubedb/redis-restic-plugin) + +### [v0.12.0](https://github.com/kubedb/redis-restic-plugin/releases/tag/v0.12.0) + +- [1354aef](https://github.com/kubedb/redis-restic-plugin/commit/1354aef) Prepare for release v0.12.0 (#52) +- [5d370f4](https://github.com/kubedb/redis-restic-plugin/commit/5d370f4) Use debian:12 base image (#51) +- [ce0f2c5](https://github.com/kubedb/redis-restic-plugin/commit/ce0f2c5) Prepare for release v0.12.0-rc.0 (#50) + + + +## [kubedb/replication-mode-detector](https://github.com/kubedb/replication-mode-detector) + +### [v0.36.0](https://github.com/kubedb/replication-mode-detector/releases/tag/v0.36.0) + +- [a7aa00b6](https://github.com/kubedb/replication-mode-detector/commit/a7aa00b6) Prepare for release v0.36.0 (#281) +- [637b35b0](https://github.com/kubedb/replication-mode-detector/commit/637b35b0) Prepare for release v0.36.0-rc.0 (#280) +- [002dffe1](https://github.com/kubedb/replication-mode-detector/commit/002dffe1) Use debian:12 base image (#279) + + + +## [kubedb/schema-manager](https://github.com/kubedb/schema-manager) + +### [v0.25.0](https://github.com/kubedb/schema-manager/releases/tag/v0.25.0) + +- [86824c75](https://github.com/kubedb/schema-manager/commit/86824c75) Prepare for release v0.25.0 (#125) +- [b96dc475](https://github.com/kubedb/schema-manager/commit/b96dc475) Prepare for release v0.25.0-rc.0 (#124) +- [6b295a6b](https://github.com/kubedb/schema-manager/commit/6b295a6b) Use debian:12 base image (#123) +- [095be7c6](https://github.com/kubedb/schema-manager/commit/095be7c6) Use debian:12 base image (#122) + + + +## [kubedb/singlestore](https://github.com/kubedb/singlestore) + +### [v0.4.0](https://github.com/kubedb/singlestore/releases/tag/v0.4.0) + +- [6882b6b0](https://github.com/kubedb/singlestore/commit/6882b6b0) Prepare for release v0.4.0 (#52) +- [f240110a](https://github.com/kubedb/singlestore/commit/f240110a) Add ReconcileState struct to pass reconciling objects as parameter (#49) +- [c77349d7](https://github.com/kubedb/singlestore/commit/c77349d7) Update dep (#51) +- [143ac48c](https://github.com/kubedb/singlestore/commit/143ac48c) Prepare for release v0.4.0-rc.0 (#50) +- [c6d41c61](https://github.com/kubedb/singlestore/commit/c6d41c61) Use debian:12 base image (#48) + + + +## [kubedb/singlestore-coordinator](https://github.com/kubedb/singlestore-coordinator) + +### [v0.4.0](https://github.com/kubedb/singlestore-coordinator/releases/tag/v0.4.0) + +- [abb99f4](https://github.com/kubedb/singlestore-coordinator/commit/abb99f4) Prepare for release v0.4.0 (#30) +- [2c8c88b](https://github.com/kubedb/singlestore-coordinator/commit/2c8c88b) Prepare for release v0.4.0-rc.0 (#29) +- [6414517](https://github.com/kubedb/singlestore-coordinator/commit/6414517) Use debian:12 base image (#28) + + + +## [kubedb/singlestore-restic-plugin](https://github.com/kubedb/singlestore-restic-plugin) + +### [v0.7.0](https://github.com/kubedb/singlestore-restic-plugin/releases/tag/v0.7.0) + +- [60f3c5a](https://github.com/kubedb/singlestore-restic-plugin/commit/60f3c5a) Prepare for release v0.7.0 (#29) +- [4fa6223](https://github.com/kubedb/singlestore-restic-plugin/commit/4fa6223) Use debian:12 base image (#28) +- [fe8a465](https://github.com/kubedb/singlestore-restic-plugin/commit/fe8a465) Prepare for release v0.7.0-rc.0 (#27) + + + +## [kubedb/solr](https://github.com/kubedb/solr) + +### [v0.4.0](https://github.com/kubedb/solr/releases/tag/v0.4.0) + +- [ec8d69d6](https://github.com/kubedb/solr/commit/ec8d69d6) Prepare for release v0.4.0 (#59) +- [9e1bbf8f](https://github.com/kubedb/solr/commit/9e1bbf8f) Update authconfig secret to implement rotateauth ops request (#58) +- [0e3da42b](https://github.com/kubedb/solr/commit/0e3da42b) Update dep (#57) +- [9203993c](https://github.com/kubedb/solr/commit/9203993c) Add Solr ReconcileState as receiver from Reconcile (#56) +- [b337c65c](https://github.com/kubedb/solr/commit/b337c65c) Prepare for release v0.4.0-rc.0 (#55) +- [71366144](https://github.com/kubedb/solr/commit/71366144) Revert "Add ReconcileState struct to pass reconciling objects as parameter (#54)" +- [ca96f684](https://github.com/kubedb/solr/commit/ca96f684) Add ReconcileState struct to pass reconciling objects as parameter (#54) +- [ef5fecdb](https://github.com/kubedb/solr/commit/ef5fecdb) Update deps & fix authsecret mutation (#53) +- [c5fb9263](https://github.com/kubedb/solr/commit/c5fb9263) Use debian:12 base image (#52) + + + +## [kubedb/tests](https://github.com/kubedb/tests) + +### [v0.34.0](https://github.com/kubedb/tests/releases/tag/v0.34.0) + +- [34b0b93f](https://github.com/kubedb/tests/commit/34b0b93f) Prepare for release v0.34.0 (#413) +- [95f5d160](https://github.com/kubedb/tests/commit/95f5d160) Update Solr and MSSQL api (#412) +- [5f40689f](https://github.com/kubedb/tests/commit/5f40689f) Exclude autoscaler,healthchecker,customizations from PG Provisioner test (#404) +- [80fb6025](https://github.com/kubedb/tests/commit/80fb6025) Fix Resource's name (#389) +- [d56b0310](https://github.com/kubedb/tests/commit/d56b0310) Add mysql backup-restore test using kubestash (#344) +- [82a5166b](https://github.com/kubedb/tests/commit/82a5166b) Add Health Check test for Druid, PgBouncer, mysql innodb (#399) +- [a203fd39](https://github.com/kubedb/tests/commit/a203fd39) fix linter issue for release v0.34.0-rc.0 (#402) +- [ec82963a](https://github.com/kubedb/tests/commit/ec82963a) Prepare for release v0.34.0-rc.0 (#401) +- [ddfac099](https://github.com/kubedb/tests/commit/ddfac099) Add Health Check Test (#329) +- [cfb52cc6](https://github.com/kubedb/tests/commit/cfb52cc6) Add Druid Restart Test (#397) +- [69f18704](https://github.com/kubedb/tests/commit/69f18704) Add Druid AutoScaling Test (#396) +- [d846af1b](https://github.com/kubedb/tests/commit/d846af1b) Add Postgres AutoScaling Tests (#364) +- [d67c3869](https://github.com/kubedb/tests/commit/d67c3869) Kubestash Backup-Restore Test for Postgres (#388) +- [4eafce8f](https://github.com/kubedb/tests/commit/4eafce8f) Add Pgpool metrics exporter tests (#377) +- [eaa4499a](https://github.com/kubedb/tests/commit/eaa4499a) Add Postgres metrics exporter tests (#368) +- [564d1cde](https://github.com/kubedb/tests/commit/564d1cde) Add Pgbouncer metrics exporter test (#385) +- [4ffa3df4](https://github.com/kubedb/tests/commit/4ffa3df4) Add Solr metrics exporter test (#390) +- [4183f879](https://github.com/kubedb/tests/commit/4183f879) working well (#381) +- [a3eb7976](https://github.com/kubedb/tests/commit/a3eb7976) Use debian:12 base image (#392) +- [ec4d737f](https://github.com/kubedb/tests/commit/ec4d737f) Add zookeeper metrics exporter test (#391) +- [be48f627](https://github.com/kubedb/tests/commit/be48f627) Add ops manager to CI (#386) +- [fa41180f](https://github.com/kubedb/tests/commit/fa41180f) remove branch from checkout (#380) +- [ac4d8455](https://github.com/kubedb/tests/commit/ac4d8455) Remove extra print (#384) +- [b26e740e](https://github.com/kubedb/tests/commit/b26e740e) Add ElasticSearch metrics exporter tests (#373) +- [f15066fb](https://github.com/kubedb/tests/commit/f15066fb) Add Singlestore metrics exporter tests (#378) +- [f5765a67](https://github.com/kubedb/tests/commit/f5765a67) mysql exporter unfocus (#382) +- [a873ff4a](https://github.com/kubedb/tests/commit/a873ff4a) Add Perconaxtradb metrics exporter tests (#375) +- [dc7c42c9](https://github.com/kubedb/tests/commit/dc7c42c9) Add MySQL metrics exporter tests (#371) +- [46189f39](https://github.com/kubedb/tests/commit/46189f39) Add MariaDB metrics exporter tests (#366) +- [4b4fa18d](https://github.com/kubedb/tests/commit/4b4fa18d) Add PGBouncer update-version, reconfiguration, custom-config and auto-scaling test (#372) +- [fe911856](https://github.com/kubedb/tests/commit/fe911856) Add kubestash-Backupstorage Phase (#374) + + + +## [kubedb/ui-server](https://github.com/kubedb/ui-server) + +### [v0.25.0](https://github.com/kubedb/ui-server/releases/tag/v0.25.0) + +- [e6c5e7d8](https://github.com/kubedb/ui-server/commit/e6c5e7d8) Prepare for release v0.25.0 (#139) +- [ee26c6b4](https://github.com/kubedb/ui-server/commit/ee26c6b4) Prepare for release v0.25.0-rc.0 (#138) + + + +## [kubedb/webhook-server](https://github.com/kubedb/webhook-server) + +### [v0.25.0](https://github.com/kubedb/webhook-server/releases/tag/v0.25.0) + +- [4e27b4e3](https://github.com/kubedb/webhook-server/commit/4e27b4e3) Prepare for release v0.25.0 (#136) +- [781984da](https://github.com/kubedb/webhook-server/commit/781984da) Prepare for release v0.25.0-rc.0 (#135) +- [fa8b1b2f](https://github.com/kubedb/webhook-server/commit/fa8b1b2f) Use debian:12 base image (#134) + + + +## [kubedb/zookeeper](https://github.com/kubedb/zookeeper) + +### [v0.4.0](https://github.com/kubedb/zookeeper/releases/tag/v0.4.0) + +- [7de2e276](https://github.com/kubedb/zookeeper/commit/7de2e276) Prepare for release v0.4.0 (#51) +- [4fdb8ff7](https://github.com/kubedb/zookeeper/commit/4fdb8ff7) Use kind v0.25.0 (#50) +- [627161bf](https://github.com/kubedb/zookeeper/commit/627161bf) Fix Resource Name Change (#48) +- [6b1ce481](https://github.com/kubedb/zookeeper/commit/6b1ce481) Update dep (#49) +- [8aed4ed2](https://github.com/kubedb/zookeeper/commit/8aed4ed2) Add ReconcileState as receiver from Reconcile (#47) +- [258b140f](https://github.com/kubedb/zookeeper/commit/258b140f) Prepare for release v0.4.0-rc.0 (#46) +- [04989850](https://github.com/kubedb/zookeeper/commit/04989850) Fix Auth-Secret Name (#45) +- [108a5495](https://github.com/kubedb/zookeeper/commit/108a5495) Use debian:12 base image (#44) +- [f275c44a](https://github.com/kubedb/zookeeper/commit/f275c44a) Add TLS for ZooKeeper (#42) + + + +## [kubedb/zookeeper-restic-plugin](https://github.com/kubedb/zookeeper-restic-plugin) + +### [v0.5.0](https://github.com/kubedb/zookeeper-restic-plugin/releases/tag/v0.5.0) + +- [fff6c0a](https://github.com/kubedb/zookeeper-restic-plugin/commit/fff6c0a) Prepare for release v0.5.0 (#21) +- [eba9823](https://github.com/kubedb/zookeeper-restic-plugin/commit/eba9823) Use debian:12 base image (#20) +- [18c68cd](https://github.com/kubedb/zookeeper-restic-plugin/commit/18c68cd) Prepare for release v0.5.0-rc.0 (#19) +- [e5240ed](https://github.com/kubedb/zookeeper-restic-plugin/commit/e5240ed) Use debian:12 base image (#18) + + + + diff --git a/docs/examples/zookeeper/monitoring/builtin-prom-zk.yaml b/docs/examples/zookeeper/monitoring/builtin-prom-zk.yaml new file mode 100644 index 0000000000..d3b61d559b --- /dev/null +++ b/docs/examples/zookeeper/monitoring/builtin-prom-zk.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zookeeper-builtin-prom + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: standard + accessModes: + - ReadWriteOnce + deletionPolicy: WipeOut + monitor: + agent: prometheus.io/builtin + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s \ No newline at end of file diff --git a/docs/examples/zookeeper/monitoring/prom-config.yaml b/docs/examples/zookeeper/monitoring/prom-config.yaml new file mode 100644 index 0000000000..45aee6317a --- /dev/null +++ b/docs/examples/zookeeper/monitoring/prom-config.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config + labels: + app: prometheus-demo + namespace: monitoring +data: + prometheus.yml: |- + global: + scrape_interval: 5s + evaluation_interval: 5s + scrape_configs: + - job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) diff --git a/docs/examples/zookeeper/monitoring/prom-zk.yaml b/docs/examples/zookeeper/monitoring/prom-zk.yaml new file mode 100644 index 0000000000..de720d8b12 --- /dev/null +++ b/docs/examples/zookeeper/monitoring/prom-zk.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zookeeper + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: longhorn + accessModes: + - ReadWriteOnce + deletionPolicy: WipeOut + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfiguration/new-secret.yaml b/docs/examples/zookeeper/reconfiguration/new-secret.yaml new file mode 100644 index 0000000000..78b7ef3694 --- /dev/null +++ b/docs/examples/zookeeper/reconfiguration/new-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +stringData: + zoo.cfg: | + maxClientCnxns=100 +kind: Secret +metadata: + name: zk-new-configuration + namespace: demo \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml b/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml new file mode 100644 index 0000000000..0f929f28ff --- /dev/null +++ b/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml @@ -0,0 +1,18 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + configSecret: + name: zk-configuration + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfiguration/secret.yaml b/docs/examples/zookeeper/reconfiguration/secret.yaml new file mode 100644 index 0000000000..3db7af1c97 --- /dev/null +++ b/docs/examples/zookeeper/reconfiguration/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +stringData: + zoo.cfg: | + maxClientCnxns=70 +kind: Secret +metadata: + name: zk-configuration + namespace: demo \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfiguration/zkops-apply-reconfiguration.yaml b/docs/examples/zookeeper/reconfiguration/zkops-apply-reconfiguration.yaml new file mode 100644 index 0000000000..d7ef3c52d3 --- /dev/null +++ b/docs/examples/zookeeper/reconfiguration/zkops-apply-reconfiguration.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-reconfig-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: zk-quickstart + configuration: + applyConfig: + zoo.cfg: | + maxClientCnxns=90 \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml b/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml new file mode 100644 index 0000000000..844046ce08 --- /dev/null +++ b/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-reconfig + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: zk-quickstart + configuration: + configSecret: + name: zk-new-configuration \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zkops-remove.yaml b/docs/examples/zookeeper/reconfigure-tls/zkops-remove.yaml new file mode 100644 index 0000000000..714c89ae57 --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zkops-remove.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + remove: true \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zkops-rotate.yaml b/docs/examples/zookeeper/reconfigure-tls/zkops-rotate.yaml new file mode 100644 index 0000000000..16d11432f4 --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zkops-rotate.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + rotateCertificates: true \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zookeeper-add-tls.yaml b/docs/examples/zookeeper/reconfigure-tls/zookeeper-add-tls.yaml new file mode 100644 index 0000000000..36542f5c2d --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zookeeper-add-tls.yaml @@ -0,0 +1,23 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + issuerRef: + name: zookeeper-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + subject: + organizations: + - zookeeper + organizationalUnits: + - client + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zookeeper-issuer.yaml b/docs/examples/zookeeper/reconfigure-tls/zookeeper-issuer.yaml new file mode 100644 index 0000000000..5d0f5284be --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zookeeper-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zk-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-ca \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zookeeper-new-issuer.yaml b/docs/examples/zookeeper/reconfigure-tls/zookeeper-new-issuer.yaml new file mode 100644 index 0000000000..f411e6c0f6 --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zookeeper-new-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zk-new-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-new-ca \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zookeeper-update-tls-issuer.yaml b/docs/examples/zookeeper/reconfigure-tls/zookeeper-update-tls-issuer.yaml new file mode 100644 index 0000000000..425f2eea92 --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zookeeper-update-tls-issuer.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + issuerRef: + name: zk-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" \ No newline at end of file diff --git a/docs/examples/zookeeper/reconfigure-tls/zookeeper.yaml b/docs/examples/zookeeper/reconfigure-tls/zookeeper.yaml new file mode 100644 index 0000000000..c121a2d2fd --- /dev/null +++ b/docs/examples/zookeeper/reconfigure-tls/zookeeper.yaml @@ -0,0 +1,16 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" \ No newline at end of file diff --git a/docs/examples/zookeeper/scaling/zk-hscale-down-ops.yaml b/docs/examples/zookeeper/scaling/zk-hscale-down-ops.yaml new file mode 100644 index 0000000000..6edf303de8 --- /dev/null +++ b/docs/examples/zookeeper/scaling/zk-hscale-down-ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: horizontal-scale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: zk-quickstart + horizontalScaling: + replicas: 3 diff --git a/docs/examples/zookeeper/scaling/zk-hscale-up-ops.yaml b/docs/examples/zookeeper/scaling/zk-hscale-up-ops.yaml new file mode 100644 index 0000000000..ea07c7d983 --- /dev/null +++ b/docs/examples/zookeeper/scaling/zk-hscale-up-ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: zk-quickstart + horizontalScaling: + replicas: 5 \ No newline at end of file diff --git a/docs/examples/zookeeper/scaling/zk-vscale.yaml b/docs/examples/zookeeper/scaling/zk-vscale.yaml new file mode 100644 index 0000000000..654b1bbe4c --- /dev/null +++ b/docs/examples/zookeeper/scaling/zk-vscale.yaml @@ -0,0 +1,20 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: vscale + namespace: demo +spec: + databaseRef: + name: zk-quickstart + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 1 + memory: 2Gi + timeout: 5m + apply: IfReady diff --git a/docs/examples/zookeeper/scaling/zookeeper.yaml b/docs/examples/zookeeper/scaling/zookeeper.yaml new file mode 100644 index 0000000000..d996a50c86 --- /dev/null +++ b/docs/examples/zookeeper/scaling/zookeeper.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 4 + storage: + resources: + requests: + storage: "1Gi" + storageClassName: "standard" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" diff --git a/docs/examples/zookeeper/tls/zookeeper-issuer.yaml b/docs/examples/zookeeper/tls/zookeeper-issuer.yaml new file mode 100644 index 0000000000..cee750e080 --- /dev/null +++ b/docs/examples/zookeeper/tls/zookeeper-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zookeeper-ca-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-ca \ No newline at end of file diff --git a/docs/examples/zookeeper/tls/zookeeper-tls.yaml b/docs/examples/zookeeper/tls/zookeeper-tls.yaml new file mode 100644 index 0000000000..eafa468bb7 --- /dev/null +++ b/docs/examples/zookeeper/tls/zookeeper-tls.yaml @@ -0,0 +1,22 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: zookeeper-ca-issuer + adminServerPort: 8080 + replicas: 5 + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" \ No newline at end of file diff --git a/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-offline.yaml b/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-offline.yaml new file mode 100644 index 0000000000..da8cb7f8a9 --- /dev/null +++ b/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-offline.yaml @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-offline-volume-expansion + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: zk-quickstart + volumeExpansion: + mode: "Offline" + node: 2Gi diff --git a/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-online.yaml b/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-online.yaml new file mode 100644 index 0000000000..ef287faa14 --- /dev/null +++ b/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-online.yaml @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-online-volume-expansion + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: zk-quickstart + volumeExpansion: + mode: "Online" + node: 2Gi diff --git a/docs/examples/zookeeper/volume-expansion/zookeeper.yaml b/docs/examples/zookeeper/volume-expansion/zookeeper.yaml new file mode 100644 index 0000000000..7e8f26ca62 --- /dev/null +++ b/docs/examples/zookeeper/volume-expansion/zookeeper.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + storageClassName: "standard" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" \ No newline at end of file diff --git a/docs/guides/zookeeper/monitoring/_index.md b/docs/guides/zookeeper/monitoring/_index.md new file mode 100644 index 0000000000..90c043fcb8 --- /dev/null +++ b/docs/guides/zookeeper/monitoring/_index.md @@ -0,0 +1,10 @@ +--- +title: Monitoring ZooKeeper +menu: + docs_{{ .version }}: + identifier: zk-monitoring-guides + name: Monitoring + parent: zk-zookeeper-guides + weight: 110 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/zookeeper/monitoring/overview.md b/docs/guides/zookeeper/monitoring/overview.md new file mode 100644 index 0000000000..be14f20d04 --- /dev/null +++ b/docs/guides/zookeeper/monitoring/overview.md @@ -0,0 +1,81 @@ +--- +title: ZooKeeper Monitoring Overview +description: ZooKeeper Monitoring Overview +menu: + docs_{{ .version }}: + identifier: zk-monitoring-overview + name: Overview + parent: zk-monitoring-guides + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring ZooKeeper with KubeDB + +KubeDB has native support for monitoring via [Prometheus](https://prometheus.io/). You can use builtin [Prometheus](https://github.com/prometheus/prometheus) scraper or [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) to monitor KubeDB managed databases. This tutorial will show you how database monitoring works with KubeDB and how to configure Database crd to enable monitoring. + +## Overview + +KubeDB uses Prometheus [exporter](https://prometheus.io/docs/instrumenting/exporters/#databases) images to export Prometheus metrics for respective databases. Following diagram shows the logical flow of database monitoring with KubeDB. + +

+  Database Monitoring Flow +

+ +When a user creates a database crd with `spec.monitor` section configured, KubeDB operator provisions the respective database and injects an exporter image as sidecar to the database pod. It also creates a dedicated stats service with name `{database-crd-name}-stats` for monitoring. Prometheus server can scrape metrics using this stats service. + +## Configure Monitoring + +In order to enable monitoring for a database, you have to configure `spec.monitor` section. KubeDB provides following options to configure `spec.monitor` section: + +| Field | Type | Uses | +| -------------------------------------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `spec.monitor.agent` | `Required` | Type of the monitoring agent that will be used to monitor this database. It can be `prometheus.io/builtin` or `prometheus.io/operator`. | +| `spec.monitor.prometheus.exporter.port` | `Optional` | Port number where the exporter side car will serve metrics. | +| `spec.monitor.prometheus.exporter.args` | `Optional` | Arguments to pass to the exporter sidecar. | +| `spec.monitor.prometheus.exporter.env` | `Optional` | List of environment variables to set in the exporter sidecar container. | +| `spec.monitor.prometheus.exporter.resources` | `Optional` | Resources required by exporter sidecar container. | +| `spec.monitor.prometheus.exporter.securityContext` | `Optional` | Security options the exporter should run with. | +| `spec.monitor.prometheus.serviceMonitor.labels` | `Optional` | Labels for `ServiceMonitor` crd. | +| `spec.monitor.prometheus.serviceMonitor.interval` | `Optional` | Interval at which metrics should be scraped. | + +## Sample Configuration + +A sample YAML for ZooKeeper crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zookeeper + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: standard + accessModes: + - ReadWriteOnce + deletionPolicy: WipeOut + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s +``` + +Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in databases namespace and this `ServiceMonitor` will have `release: prometheus` label. + + +## Next Steps + +- Learn how to monitor ZooKeeper database with KubeDB using [builtin-Prometheus](/docs/guides/zookeeper/monitoring/using-builtin-prometheus.md) +- Learn how to monitor ZooKeeper database with KubeDB using [Prometheus operator](/docs/guides/zookeeper/monitoring/using-prometheus-operator.md). diff --git a/docs/guides/zookeeper/monitoring/using-builtin-prometheus.md b/docs/guides/zookeeper/monitoring/using-builtin-prometheus.md new file mode 100644 index 0000000000..6746c0a8a8 --- /dev/null +++ b/docs/guides/zookeeper/monitoring/using-builtin-prometheus.md @@ -0,0 +1,370 @@ +--- +title: Monitor ZooKeeper using Builtin Prometheus Discovery +menu: + docs_{{ .version }}: + identifier: zk-using-builtin-prometheus-monitoring + name: Builtin Prometheus + parent: zk-monitoring-guides + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring ZooKeeper with builtin Prometheus + +This tutorial will show you how to monitor ZooKeeper database using builtin [Prometheus](https://github.com/prometheus/prometheus) scraper. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- If you are not familiar with how to configure Prometheus to scrape metrics from various Kubernetes resources, please read the tutorial from [here](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/zookeeper/monitoring/overview.md). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. + + ```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/zookeeper](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/zookeeper) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy ZooKeeper with Monitoring Enabled + +At first, let's deploy an ZooKeeper database with monitoring enabled. Below is the ZooKeeper object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zookeeper-builtin-prom + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: standard + accessModes: + - ReadWriteOnce + deletionPolicy: WipeOut + monitor: + agent: prometheus.io/builtin + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s +``` + +Here, + +- `spec.monitor.agent: prometheus.io/builtin` specifies that we are going to monitor this server using builtin Prometheus scraper. + +Let's create the ZooKeeper crd we have shown above. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/monitoring/builtin-prom-zk.yaml +zookeeper.kubedb.com/zookeeper-builtin-prom created +``` + +Now, wait for the database to go into `Running` state. + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zookeeper-builtin-prom 3.8.3 Ready 129m +``` + +KubeDB will create a separate stats service with name `{ZooKeeper crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=zookeeper-builtin-prom" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +zookeeper-builtin-prom ClusterIP 10.43.115.171 2181/TCP 129m +zookeeper-builtin-prom-admin-server ClusterIP 10.43.55.7 8080/TCP 129m +zookeeper-builtin-prom-pods ClusterIP None 2181/TCP,2888/TCP,3888/TCP 129m +zookeeper-builtin-prom-stats ClusterIP 10.43.211.84 7000/TCP 129m +``` + +Here, `zookeeper-builtin-prom-stats` service has been created for monitoring purpose. Let's describe the service. + +```bash +$ kubectl describe svc -n demo zookeeper-builtin-prom-stats +Name: zookeeper-builtin-prom-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=zookeeper-builtin-prom + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=zookeepers.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/builtin + prometheus.io/path: /metrics + prometheus.io/port: 7000 + prometheus.io/scrape: true +Selector: app.kubernetes.io/instance=zookeeper-builtin-prom,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=zookeepers.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.43.211.84 +IPs: 10.43.211.84 +Port: metrics 7000/TCP +TargetPort: metrics/TCP +Endpoints: 10.42.0.124:7000,10.42.0.126:7000,10.42.0.128:7000 +Session Affinity: None +Events: +``` + +You can see that the service contains following annotations. + +```bash +prometheus.io/path: /metrics +prometheus.io/port: 7000 +prometheus.io/scrape: true +``` + +The Prometheus server will discover the service endpoint using these specifications and will scrape metrics from the exporter. + +## Configure Prometheus Server + +Now, we have to configure a Prometheus scraping job to scrape the metrics using this service. We are going to configure scraping job similar to this [kubernetes-service-endpoints](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin#kubernetes-service-endpoints) job that scrapes metrics from endpoints of a service. + +Let's configure a Prometheus scraping job to collect metrics from this service. + +```yaml +- job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +### Configure Existing Prometheus Server + +If you already have a Prometheus server running, you have to add above scraping job in the `ConfigMap` used to configure the Prometheus server. Then, you have to restart it for the updated configuration to take effect. + +>If you don't use a persistent volume for Prometheus storage, you will lose your previously scraped data on restart. + +### Deploy New Prometheus Server + +If you don't have any existing Prometheus server running, you have to deploy one. In this section, we are going to deploy a Prometheus server in `monitoring` namespace to collect metrics using this stats service. + +**Create ConfigMap:** + +At first, create a ConfigMap with the scraping configuration. Bellow, the YAML of ConfigMap that we are going to create in this tutorial. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config + labels: + app: prometheus-demo + namespace: monitoring +data: + prometheus.yml: |- + global: + scrape_interval: 5s + evaluation_interval: 5s + scrape_configs: + - job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +Let's create above `ConfigMap`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/monitoring/prom-config.yaml +configmap/prometheus-config created +``` + +**Create RBAC:** + +If you are using an RBAC enabled cluster, you have to give necessary RBAC permissions for Prometheus. Let's create necessary RBAC stuffs for Prometheus, + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/rbac.yaml +clusterrole.rbac.authorization.k8s.io/prometheus created +serviceaccount/prometheus created +clusterrolebinding.rbac.authorization.k8s.io/prometheus created +``` + +>YAML for the RBAC resources created above can be found [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/rbac.yaml). + +**Deploy Prometheus:** + +Now, we are ready to deploy Prometheus server. We are going to use following [deployment](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/deployment.yaml) to deploy Prometheus server. + +Let's deploy the Prometheus server. + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/deployment.yaml +deployment.apps/prometheus created +``` + +### Verify Monitoring Metrics + +Prometheus server is listening to port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +At first, let's check if the Prometheus pod is in `Running` state. + +```bash +$ kubectl get pod -n monitoring -l=app=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-d64b668fb-vg746 1/1 Running 0 28s +``` + +Now, run following command on a separate terminal to forward 9090 port of `prometheus-7bd56c6865-8dlpv` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-d64b668fb-vg746 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see the endpoint of `zookeeper-builtin-prom-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `ZooKeeper` database `zookeeper-builtin-prom` through stats service `zookeeper-builtin-prom-stats`. + +Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run following commands + +```bash +kubectl delete -n demo zk/zookeeper-builtin-prom + +kubectl delete -n monitoring deployment.apps/prometheus + +kubectl delete -n monitoring clusterrole.rbac.authorization.k8s.io/prometheus +kubectl delete -n monitoring serviceaccount/prometheus +kubectl delete -n monitoring clusterrolebinding.rbac.authorization.k8s.io/prometheus + +kubectl delete ns demo +kubectl delete ns monitoring +``` + +## Next Steps + +- Monitor your ZooKeeper database with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/zookeeper/monitoring/using-prometheus-operator.md). + +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/zookeeper/monitoring/using-prometheus-operator.md b/docs/guides/zookeeper/monitoring/using-prometheus-operator.md new file mode 100644 index 0000000000..4943bea813 --- /dev/null +++ b/docs/guides/zookeeper/monitoring/using-prometheus-operator.md @@ -0,0 +1,357 @@ +--- +title: Monitor ZooKeeper using Prometheus Operator +menu: + docs_{{ .version }}: + identifier: zk-using-prometheus-operator-monitoring + name: Prometheus Operator + parent: zk-monitoring-guides + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring ZooKeeper Using Prometheus operator + +[Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) provides simple and Kubernetes native way to deploy and configure Prometheus server. This tutorial will show you how to use Prometheus operator to monitor ZooKeeper database deployed with KubeDB. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/zookeeper/monitoring/overview.md). + +- We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, you can deploy one using this helm chart [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy the prometheus operator helm chart. We are going to deploy database in `demo` namespace. + + ```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + + + +> Note: YAML files used in this tutorial are stored in [docs/examples/ZooKeeper](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/zookeeper) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find out required labels for ServiceMonitor + +We need to know the labels used to select `ServiceMonitor` by a `Prometheus` crd. We are going to provide these labels in `spec.monitor.prometheus.serviceMonitor.labels` field of ZooKeeper crd so that KubeDB creates `ServiceMonitor` object accordingly. + +At first, let's find out the available Prometheus server in our cluster. + +```bash +$ kubectl get prometheus --all-namespaces +NAMESPACE NAME VERSION DESIRED READY RECONCILED AVAILABLE AGE +monitoring prometheus-kube-prometheus-prometheus v2.54.1 1 1 True True 22h +``` + +> If you don't have any Prometheus server running in your cluster, deploy one following the guide specified in **Before You Begin** section. + +Now, let's view the YAML of the available Prometheus server `prometheus` in `monitoring` namespace. + +```yaml +$ kubectl get prometheus -n monitoring prometheus-kube-prometheus-prometheus -o yaml +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + meta.helm.sh/release-name: prometheus + meta.helm.sh/release-namespace: monitoring + creationTimestamp: "2024-11-06T07:39:12Z" + generation: 1 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 65.1.1 + chart: kube-prometheus-stack-65.1.1 + heritage: Helm + release: prometheus + name: prometheus-kube-prometheus-prometheus + namespace: monitoring + resourceVersion: "91198" + uid: 4f52775a-e0f8-4158-aa3e-6a1d558e0ef9 +spec: + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-kube-prometheus-alertmanager + namespace: monitoring + pathPrefix: / + port: http-web + automountServiceAccountToken: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: http://prometheus-kube-prometheus-prometheus.monitoring:9090 + hostNetwork: false + image: quay.io/prometheus/prometheus:v2.54.1 + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: + matchLabels: + release: prometheus + portName: http-web + probeNamespaceSelector: {} + probeSelector: + matchLabels: + release: prometheus + replicas: 1 + retention: 10d + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: + matchLabels: + release: prometheus + scrapeConfigNamespaceSelector: {} + scrapeConfigSelector: + matchLabels: + release: prometheus + scrapeInterval: 30s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + serviceAccountName: prometheus-kube-prometheus-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: + matchLabels: + release: prometheus + shards: 1 + tsdb: + outOfOrderTimeWindow: 0s + version: v2.54.1 + walCompression: true +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-11-07T05:26:29Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Available + - lastTransitionTime: "2024-11-07T05:26:29Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Reconciled + paused: false + replicas: 1 + selector: app.kubernetes.io/instance=prometheus-kube-prometheus-prometheus,app.kubernetes.io/managed-by=prometheus-operator,app.kubernetes.io/name=prometheus,operator.prometheus.io/name=prometheus-kube-prometheus-prometheus,prometheus=prometheus-kube-prometheus-prometheus + shardStatuses: + - availableReplicas: 1 + replicas: 1 + shardID: "0" + unavailableReplicas: 0 + updatedReplicas: 1 + shards: 1 + unavailableReplicas: 0 + updatedReplicas: 1 +``` + +Notice the `spec.serviceMonitorSelector` section. Here, `release: prometheus` label is used to select `ServiceMonitor` crd. So, we are going to use this label in `spec.monitor.prometheus.serviceMonitor.labels` field of ZooKeeper crd. + +## Deploy ZooKeeper with Monitoring Enabled + +At first, let's deploy an ZooKeeper database with monitoring enabled. Below is the ZooKeeper object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zookeeper + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: longhorn + accessModes: + - ReadWriteOnce + deletionPolicy: WipeOut + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s +``` + +Here, + +- `monitor.agent: prometheus.io/operator` indicates that we are going to monitor this server using Prometheus operator. +- `monitor.prometheus.serviceMonitor.labels` specifies that KubeDB should create `ServiceMonitor` with these labels. +- `monitor.prometheus.interval` indicates that the Prometheus server should scrape metrics from this database with 10 seconds interval. + +Let's create the ZooKeeper object that we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/monitoring/prom-zk.yaml +zookeeper.kubedb.com/zookeeper created +``` + +Now, wait for the database to go into `Running` state. + +```bash +$ kubectl get zk -n demo zookeeper +NAME VERSION STATUS AGE +zookeeper 3.8.3 Ready 34s +``` + +KubeDB will create a separate stats service with name `{ZooKeeper crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=zookeeper" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +zookeeper ClusterIP 10.43.121.151 2181/TCP 26s +zookeeper-admin-server ClusterIP 10.43.28.44 8080/TCP 26s +zookeeper-pods ClusterIP None 2181/TCP,2888/TCP,3888/TCP 26s +zookeeper-stats ClusterIP 10.43.19.32 7000/TCP 26s +``` + +Here, `zookeeper-stats` service has been created for monitoring purpose. + +Let's describe this stats service. + +```yaml +$ kubectl describe svc -n demo zookeeper-stats +Name: zookeeper-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=zookeeper + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=zookeepers.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/operator +Selector: app.kubernetes.io/instance=zookeeper,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=zookeepers.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.43.19.32 +IPs: 10.43.19.32 +Port: metrics 7000/TCP +TargetPort: metrics/TCP +Endpoints: 10.42.0.100:7000,10.42.0.96:7000,10.42.0.98:7000 +Session Affinity: None +Events: +``` + +Notice the `Labels` and `Port` fields. `ServiceMonitor` will use this information to target its endpoints. + +KubeDB will also create a `ServiceMonitor` crd in `demo` namespace that select the endpoints of `zookeeper-stats` service. Verify that the `ServiceMonitor` crd has been created. + +```bash +$ kubectl get servicemonitor -n demo +NAME AGE +zookeeper-stats 2m40s +``` + +Let's verify that the `ServiceMonitor` has the label that we had specified in `spec.monitor` section of ZooKeeper crd. + +```yaml +$ kubectl get servicemonitor -n demo zookeeper-stats -o yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + creationTimestamp: "2024-11-07T07:20:08Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: zookeeper + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: zookeepers.kubedb.com + release: prometheus + name: zookeeper-stats + namespace: demo + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: zookeeper-stats + uid: 5865230f-7e68-452c-90ae-c760fbd694d0 + resourceVersion: "94745" + uid: 404eb867-f01a-4a9a-9646-9008159d5408 +spec: + endpoints: + - honorLabels: true + interval: 10s + path: /metrics + port: metrics + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: zookeeper + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: zookeepers.kubedb.com + kubedb.com/role: stats +``` + +Notice that the `ServiceMonitor` has label `release: prometheus` that we had specified in ZooKeeper crd. + +Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `zookeeper-stats` service. It also, target the `metrics` port that we have seen in the stats service. + +## Verify Monitoring Metrics + +At first, let's find out the respective Prometheus pod for `prometheus` Prometheus server. + +```bash +$ kubectl get pod -n monitoring -l=app.kubernetes.io/name=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 1 22h +``` + +Prometheus server is listening to port `9090` of `prometheus-prometheus-kube-prometheus-prometheus-0` pod. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +Run following command on a separate terminal to forward the port 9090 of `prometheus-prometheus-kube-prometheus-prometheus-0` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-prometheus-kube-prometheus-prometheus-0 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `metrics` endpoint of `zookeeper-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the `endpoint` and `service` labels marked by the red rectangles. It verifies that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create a beautiful dashboard with collected metrics. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run following commands + +```bash +kubectl delete -n demo zk/zookeeper +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your ZooKeeper database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/zookeeper/monitoring/using-builtin-prometheus.md). +- Detail concepts of [ZooKeeper object](/docs/guides/zookeeper/concepts/zookeeper.md). +- Detail concepts of [ZooKeeperVersion object](/docs/guides/zookeeper/concepts/catalog.md). + +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/zookeeper/reconfigure-tls/_index.md b/docs/guides/zookeeper/reconfigure-tls/_index.md new file mode 100644 index 0000000000..5d3b1e62d4 --- /dev/null +++ b/docs/guides/zookeeper/reconfigure-tls/_index.md @@ -0,0 +1,10 @@ +--- +title: Reconfigure TLS/SSL +menu: + docs_{{ .version }}: + identifier: zk-reconfigure-tls + name: Reconfigure TLS/SSL + parent: zk-zookeeper-guides + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/zookeeper/reconfigure-tls/overview.md b/docs/guides/zookeeper/reconfigure-tls/overview.md new file mode 100644 index 0000000000..eefb9fc393 --- /dev/null +++ b/docs/guides/zookeeper/reconfigure-tls/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring TLS/SSL +menu: + docs_{{ .version }}: + identifier: zk-reconfigure-tls-overview + name: Overview + parent: zk-reconfigure-tls + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring TLS of ZooKeeper + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures TLS configuration i.e. add TLS, remove TLS, update Issuer/Cluster Issuer or Certificates and rotate the Certificates of `ZooKeeper`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + +## How Reconfiguring ZooKeeper TLS Configuration Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures TLS of a `ZooKeeper`. Open the image in a new tab to see the enlarged version. + +
+  Reconfiguring TLS process of ZooKeeper +
Fig: Reconfiguring TLS process of ZooKeeper
+
+ +The Reconfiguring ZooKeeper TLS process consists of the following steps: + +1. At first, a user creates a `ZooKeeper` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `ZooKeeper` CRO. + +3. When the operator finds a `ZooKeeper` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure the TLS configuration of the `ZooKeeper` database the user creates a `ZooKeeperOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ZooKeeperOpsRequest` CR. + +6. When it finds a `ZooKeeperOpsRequest` CR, it pauses the `ZooKeeper` object which is referred from the `ZooKeeperOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `ZooKeeper` object during the reconfiguring TLS process. + +7. Then the `KubeDB` Ops-manager operator will add, remove, update or rotate TLS configuration based on the Ops Request yaml. + +8. Then the `KubeDB` Ops-manager operator will restart all the Pods of the database so that they restart with the new TLS configuration defined in the `ZooKeeperOpsRequest` CR. + +9. After the successful reconfiguring of the `ZooKeeper` TLS, the `KubeDB` Ops-manager operator resumes the `ZooKeeper` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the [next](/docs/guides/zookeeper/reconfigure-tls/reconfigure-tls.md) docs, we are going to show a step by step guide on reconfiguring TLS configuration of a ZooKeeper database using `ZooKeeperOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/zookeeper/reconfigure-tls/reconfigure-tls.md b/docs/guides/zookeeper/reconfigure-tls/reconfigure-tls.md new file mode 100644 index 0000000000..91351d3636 --- /dev/null +++ b/docs/guides/zookeeper/reconfigure-tls/reconfigure-tls.md @@ -0,0 +1,1014 @@ +--- +title: Reconfigure ZooKeeper TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: zk-reconfigure-tls-zookeeper + name: Reconfigure ZooKeeper TLS/SSL Encryption + parent: zk-reconfigure-tls + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure ZooKeeper TLS/SSL (Transport Encryption) + +KubeDB supports reconfigure i.e. add, remove, update and rotation of TLS/SSL certificates for existing ZooKeeper database via a ZooKeeperOpsRequest. This tutorial will show you how to use KubeDB to reconfigure TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/zookeeper](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/zookeeper) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Add TLS to a ZooKeeper database + +Here, We are going to create a ZooKeeper without TLS and then reconfigure the database to use TLS. + +### Deploy ZooKeeper without TLS + +In this section, we are going to deploy a ZooKeeper ensemble without TLS. In the next few sections we will reconfigure TLS using `ZooKeeperOpsRequest` CRD. Below is the YAML of the `ZooKeeper` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" + +``` + +Let's create the `ZooKeeper` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zookeeper.yaml +zookeeper.kubedb.com/zk-quickstart created +``` + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ watch kubectl get zookeeper -n demo +NAME TYPE VERSION STATUS AGE +zk-quickstart kubedb.com/v1alpha2 3.8.3 Ready 60s +``` + +Now, we can exec one zookeeper broker pod and verify configuration that the TLS is disabled. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ cat ../conf/zoo.cfg +4lw.commands.whitelist=* +dataDir=/data +tickTime=2000 +initLimit=10 +syncLimit=2 +clientPort=2181 +globalOutstandingLimit=1000 +preAllocSize=65536 +snapCount=10000 +commitLogCount=500 +snapSizeLimitInKb=4194304 +maxCnxns=0 +maxClientCnxns=60 +minSessionTimeout=4000 +maxSessionTimeout=40000 +autopurge.snapRetainCount=3 +autopurge.purgeInterval=1 +quorumListenOnAllIPs=false +admin.serverPort=8080 +authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider +reconfigEnabled=true +standaloneEnabled=false +dynamicConfigFile=/data/zoo.cfg.dynamic +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ +``` + +We can verify from the above output that TLS is disabled for this Ensemble. + +### Create Issuer/ ClusterIssuer + +Now, We are going to create an example `Issuer` that will be used to enable SSL/TLS in ZooKeeper. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating a ca certificates using openssl. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca/O=kubedb" +Generating a RSA private key +................+++++ +........................+++++ +writing new private key to './ca.key' +----- +``` + +- Now we are going to create a ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls zookeeper-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/zookeeper-ca created +``` + +Now, Let's create an `Issuer` using the `zookeeper-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zk-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zookeeper-issuer.yaml +issuer.cert-manager.io/zk-issuer created +``` + +### Create ZooKeeperOpsRequest + +In order to add TLS to the zookeeper, we have to create a `ZooKeeperOpsRequest` CRO with our created issuer. Below is the YAML of the `ZooKeeperOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + issuerRef: + name: zookeeper-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + subject: + organizations: + - zookeeper + organizationalUnits: + - client + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `zk-quickstart` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on zookeeper. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/zookeeper/concepts/zookeeper.md#spectls). + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zookeeper-add-tls.yaml +zookeeperopsrequest.ops.kubedb.com/zkops-add-tls created +``` + +#### Verify TLS Enabled Successfully + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CRO, + +```bash +$ kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zkops-add-tls ReconfigureTLS Successful 4m36s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zkops-add-tls +Name: zkops-add-tls +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T05:46:18Z + Generation: 1 + Resource Version: 2118117 + UID: aa25e2b8-2583-4757-b3f7-b053fc21819f +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Tls: + Issuer Ref: + API Group: cert-manager.io + Kind: Issuer + Name: zookeeper-ca-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T05:46:18Z + Message: ZooKeeper ops-request has started to reconfigure tls for zookeeper nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T05:46:31Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T05:46:26Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T05:46:26Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T05:46:26Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T05:46:36Z + Message: successfully reconciled the ZooKeeper with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T05:48:56Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T05:46:41Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T05:46:41Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T05:46:46Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T05:47:26Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T05:47:26Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T05:48:16Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T05:48:16Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T05:48:56Z + Message: Successfully completed reconfigureTLS for zookeeper. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + +Now, Let's exec into a zookeeper ensemble pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ cat ../conf/zoo.cfg +4lw.commands.whitelist=* +dataDir=/data +tickTime=2000 +initLimit=10 +syncLimit=2 +clientPort=2181 +globalOutstandingLimit=1000 +preAllocSize=65536 +snapCount=10000 +commitLogCount=500 +snapSizeLimitInKb=4194304 +maxCnxns=0 +maxClientCnxns=60 +minSessionTimeout=4000 +maxSessionTimeout=40000 +autopurge.snapRetainCount=3 +autopurge.purgeInterval=1 +quorumListenOnAllIPs=false +admin.serverPort=8080 +authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider +reconfigEnabled=true +standaloneEnabled=false +dynamicConfigFile=/data/zoo.cfg.dynamic +secureClientPort=2182 +serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory +authProvider.x509=org.apache.zookeeper.server.auth.X509AuthenticationProvider +ssl.keyStore.location=/var/private/ssl/server.keystore.jks +ssl.keyStore.password=fdjk2dgffqn9 +ssl.trustStore.location=/var/private/ssl/server.truststore.jks +ssl.trustStore.password=fdjk2dgffqn9 +sslQuorum=true +ssl.quorum.keyStore.location=/var/private/ssl/server.keystore.jks +ssl.quorum.keyStore.password=fdjk2dgffqn9 +ssl.quorum.trustStore.location=/var/private/ssl/server.truststore.jks +ssl.quorum.trustStore.password=fdjk2dgffqn9 +ssl.quorum.hostnameVerification=false +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ +``` + +We can see from the above output that, keystore location is `/var/private/ssl/server.keystore.jks` which means that TLS is enabled. + +## Rotate Certificate + +Now we are going to rotate the certificate of this cluster. First let's check the current expiration date of the certificate. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ openssl x509 -in /var/private/ssl/tls.crt -inform PEM -enddate -nameopt RFC2253 -noout +notAfter=Feb 2 12:53:30 2025 GMT +``` + +So, the certificate will expire on this time `Feb 2 12:53:30 2025 GMT`. + +### Create ZooKeeperOpsRequest + +Now we are going to increase it using a ZooKeeperOpsRequest. Below is the yaml of the ops request that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + rotateCertificates: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `zk-quickstart`. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our cluster. +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this zookeeper cluster. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zkops-rotate.yaml +zookeeperopsrequest.ops.kubedb.com/zkops-rotate created +``` + +#### Verify Certificate Rotated Successfully + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CRO, + +```bash +$ kubectl get zookeeperopsrequests -n demo zkops-rotate +NAME TYPE STATUS AGE +zkops-rotate ReconfigureTLS Successful 4m4s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zkops-rotate +Name: zkops-rotate +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T13:10:03Z + Generation: 1 + Resource Version: 2153555 + UID: a1886cd3-784b-4523-936c-a510327d6129 +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Tls: + Rotate Certificates: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T13:10:03Z + Message: ZooKeeper ops-request has started to reconfigure tls for zookeeper nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T13:10:16Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T13:10:11Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T13:10:11Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T13:10:11Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T13:10:22Z + Message: successfully reconciled the ZooKeeper with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T13:12:42Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T13:10:27Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:10:27Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:10:32Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T13:11:07Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:11:07Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:11:52Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:11:52Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:12:42Z + Message: Successfully completed reconfigureTLS for zookeeper. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m57s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/zkops-rotate + Normal Starting 2m57s KubeDB Ops-manager Operator Pausing ZooKeeper database: demo/zk-quickstart + Normal Successful 2m57s KubeDB Ops-manager Operator Successfully paused ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zkops-rotate + Warning get certificate; ConditionStatus:True 2m49s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m49s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m49s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 2m49s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m49s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m49s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 2m49s KubeDB Ops-manager Operator Successfully synced all certificates + Warning get certificate; ConditionStatus:True 2m44s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m44s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m44s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 2m44s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m44s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m44s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 2m44s KubeDB Ops-manager Operator Successfully synced all certificates + Normal UpdatePetSets 2m38s KubeDB Ops-manager Operator successfully reconciled the ZooKeeper with tls configuration + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-0 2m33s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-0 2m33s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning running pod; ConditionStatus:False 2m28s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-1 113s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-1 113s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-2 68s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-2 68s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Normal RestartNodes 18s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 18s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 18s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zkops-rotate +``` + +Now, let's check the expiration date of the certificate. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ openssl x509 -in /var/private/ssl/tls.crt -inform PEM -enddate -nameopt RFC2253 -noout +notAfter=Feb 2 13:12:42 2025 GMT +``` + +As we can see from the above output, the certificate has been rotated successfully. + +## Change Issuer/ClusterIssuer + +Now, we are going to change the issuer of this database. + +- Let's create a new ca certificate and key using a different subject `CN=ca-update,O=kubedb-updated`. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca-updated/O=kubedb-updated" +Generating a RSA private key +..............................................................+++++ +......................................................................................+++++ +writing new private key to './ca.key' +----- +``` + +- Now we are going to create a new ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls zookeeper-new-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/zookeeper-new-ca created +``` + +Now, Let's create a new `Issuer` using the `mongo-new-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zk-new-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-new-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zookeeper-new-issuer.yaml +issuer.cert-manager.io/zk-new-issuer created +``` + +### Create ZooKeeperOpsRequest + +In order to use the new issuer to issue new certificates, we have to create a `ZooKeeperOpsRequest` CRO with the newly created issuer. Below is the YAML of the `ZooKeeperOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + issuerRef: + name: zk-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `zk-quickstart` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our zookeeper. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zookeeper-update-tls-issuer.yaml +zookeeperpsrequest.ops.kubedb.com/zkops-update-issuer created +``` + +#### Verify Issuer is changed successfully + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CRO, + +```bash +$ kubectl get zookeeperopsrequests -n demo zkops-update-issuer +NAME TYPE STATUS AGE +zkops-update-issuer ReconfigureTLS Successful 8m6s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zkops-update-issuer +Name: zkops-update-issuer +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T13:27:25Z + Generation: 1 + Resource Version: 2155331 + UID: 399cae54-a6ab-4848-93ff-5dba09a128d7 +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Tls: + Issuer Ref: + API Group: cert-manager.io + Kind: Issuer + Name: zk-new-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T13:27:25Z + Message: ZooKeeper ops-request has started to reconfigure tls for zookeeper nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T13:27:35Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T13:27:30Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T13:27:30Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T13:27:30Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T13:27:40Z + Message: successfully reconciled the ZooKeeper with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T13:30:00Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T13:27:45Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:27:45Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:27:50Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T13:28:30Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:28:30Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:29:20Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:29:20Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:30:00Z + Message: Successfully completed reconfigureTLS for zookeeper. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m53s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/zkops-update-issuer + Warning get certificate; ConditionStatus:True 2m48s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m48s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m48s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 2m48s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m48s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m48s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 2m48s KubeDB Ops-manager Operator Successfully synced all certificates + Warning get certificate; ConditionStatus:True 2m43s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m43s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m43s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 2m43s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 2m43s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 2m43s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 2m43s KubeDB Ops-manager Operator Successfully synced all certificates + Normal UpdatePetSets 2m38s KubeDB Ops-manager Operator successfully reconciled the ZooKeeper with tls configuration + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-0 2m33s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-0 2m33s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning running pod; ConditionStatus:False 2m28s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-1 108s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-1 108s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-2 58s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-2 58s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Normal RestartNodes 18s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 18s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 18s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zkops-update-issuer +``` + +Now, Let's exec into a zookeeper node and find out the ca subject to see if it matches the one we have provided. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ keytool -list -v -keystore /var/private/ssl/server.keystore.jks -storepass fdjk2dgffqn9 | grep 'Issuer' +Issuer: O=kubedb-updated, CN=ca-updated +Issuer: O=kubedb-updated, CN=ca-updated +``` + +We can see from the above output that, the subject name matches the subject name of the new ca certificate that we have created. So, the issuer is changed successfully. + +## Remove TLS from the Database + +Now, we are going to remove TLS from this database using a ZooKeeperOpsRequest. + +### Create ZooKeeperOpsRequest + +Below is the YAML of the `ZooKeeperOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zkops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: zk-quickstart + tls: + remove: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `zk-quickstart` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on ZooKeeper. +- `spec.tls.remove` specifies that we want to remove tls from this cluster. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfigure-tls/zkops-remove.yaml +zookeeperopsrequest.ops.kubedb.com/zkops-remove created +``` + +#### Verify TLS Removed Successfully + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CRO, + +```bash +$ kubectl get zookeeperopsrequest -n demo zkops-remove +NAME TYPE STATUS AGE +zkops-remove ReconfigureTLS Successful 105s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zkops-remove +Name: zkops-remove +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T13:39:19Z + Generation: 1 + Resource Version: 2156556 + UID: 8f669fe1-169f-4446-9d12-bf959216e2e0 +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Tls: + Remove: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T13:39:19Z + Message: ZooKeeper ops-request has started to reconfigure tls for zookeeper nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T13:39:27Z + Message: successfully reconciled the ZooKeeper with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T13:41:42Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T13:39:32Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:39:32Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-11-04T13:39:37Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T13:40:22Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:40:22Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-11-04T13:41:02Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:41:02Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-11-04T13:41:42Z + Message: Successfully completed reconfigureTLS for zookeeper. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m26s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/zkops-remove + Normal Starting 2m26s KubeDB Ops-manager Operator Pausing ZooKeeper database: demo/zk-quickstart + Normal Successful 2m26s KubeDB Ops-manager Operator Successfully paused ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zkops-remove + Normal UpdatePetSets 2m18s KubeDB Ops-manager Operator successfully reconciled the ZooKeeper with tls configuration + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-0 2m13s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-0 2m13s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning running pod; ConditionStatus:False 2m8s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-1 83s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-1 83s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-2 43s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-2 43s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Normal RestartNodes 3s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 3s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 3s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zkops-remove +``` + +Now, Let's exec into one of the broker node and find out that TLS is disabled or not. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ cat ../conf/zoo.cfg +4lw.commands.whitelist=* +dataDir=/data +tickTime=2000 +initLimit=10 +syncLimit=2 +clientPort=2181 +globalOutstandingLimit=1000 +preAllocSize=65536 +snapCount=10000 +commitLogCount=500 +snapSizeLimitInKb=4194304 +maxCnxns=0 +maxClientCnxns=60 +minSessionTimeout=4000 +maxSessionTimeout=40000 +autopurge.snapRetainCount=3 +autopurge.purgeInterval=1 +quorumListenOnAllIPs=false +admin.serverPort=8080 +authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider +reconfigEnabled=true +standaloneEnabled=false +dynamicConfigFile=/data/zoo.cfg.dynamic +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ +``` + +So, we can see from the above that, output that tls is disabled successfully. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete opsrequest zkops-add-tls zkops-remove zkops-rotate zkops-update-issuer +kubectl delete zookeeper -n demo zk-quickstart +kubectl delete issuer -n demo zk-issuer zk-new-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [ZooKeeper object](/docs/guides/zookeeper/concepts/zookeeper.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + diff --git a/docs/guides/zookeeper/reconfigure/_index.md b/docs/guides/zookeeper/reconfigure/_index.md new file mode 100644 index 0000000000..1fcfbf6768 --- /dev/null +++ b/docs/guides/zookeeper/reconfigure/_index.md @@ -0,0 +1,10 @@ +--- +title: Reconfigure +menu: + docs_{{ .version }}: + identifier: zk-reconfigure + name: Reconfigure + parent: zk-zookeeper-guides + weight: 80 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/zookeeper/reconfigure/overview.md b/docs/guides/zookeeper/reconfigure/overview.md new file mode 100644 index 0000000000..36f508cc1f --- /dev/null +++ b/docs/guides/zookeeper/reconfigure/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring ZooKeeper +menu: + docs_{{ .version }}: + identifier: zk-reconfigure-overview + name: Overview + parent: zk-reconfigure + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring ZooKeeper + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures `ZooKeeper` cluster. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + +## How does Reconfiguring ZooKeeper Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures `ZooKeeper` database components. Open the image in a new tab to see the enlarged version. + +
+  Reconfiguring process of ZooKeeper +
Fig: Reconfiguring process of ZooKeeper
+
+ +The Reconfiguring ZooKeeper process consists of the following steps: + +1. At first, a user creates a `ZooKeeper` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `ZooKeeper` CR. + +3. When the operator finds a `ZooKeeper` CR, it creates required number of `Petsets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure the `ZooKeeper` database the user creates a `ZooKeeperOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ZooKeeperOpsRequest` CR. + +6. When it finds a `ZooKeeperOpsRequest` CR, it halts the `ZooKeeper` object which is referred from the `ZooKeeperOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `ZooKeeper` object during the reconfiguring process. + +7. Then the `KubeDB` Ops-manager operator will replace the existing configuration with the new configuration provided or merge the new configuration with the existing configuration according to the `ZooKeeperOpsRequest` CR. + +8. Then the `KubeDB` Ops-manager operator will restart the related Petset Pods so that they restart with the new configuration defined in the `ZooKeeperOpsRequest` CR. + +9. After the successful reconfiguring of the `ZooKeeper` components, the `KubeDB` Ops-manager operator resumes the `ZooKeeper` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the [next](/docs/guides/zookeeper/reconfigure/reconfigure.md) docs, we are going to show a step by step guide on reconfiguring ZooKeeper database components using `ZooKeeperOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/zookeeper/reconfigure/reconfigure.md b/docs/guides/zookeeper/reconfigure/reconfigure.md new file mode 100644 index 0000000000..c108938793 --- /dev/null +++ b/docs/guides/zookeeper/reconfigure/reconfigure.md @@ -0,0 +1,529 @@ +--- +title: Reconfigure ZooKeeper Ensemble +menu: + docs_{{ .version }}: + identifier: zk-ensemble-reconfigure + name: Reconfigure Configurations + parent: zk-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure ZooKeeper Ensemble + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a ZooKeeper ensemble. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/zookeeper/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [examples](/docs/examples/zookeeper) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `ZooKeeper` cluster using a supported version by `KubeDB` operator. Then we are going to apply `ZooKeeperOpsRequest` to reconfigure its configuration. + +### Prepare ZooKeeper Ensemble + +Now, we are going to deploy a `ZooKeeper` cluster with version `3.8.3`. + +### Deploy ZooKeeper Ensemble + +At first, we will create `secret` named zk-configuration containing required configuration settings. + +```yaml +apiVersion: v1 +stringData: + zoo.cfg: | + maxClientCnxns=70 +kind: Secret +metadata: + name: zk-configuration + namespace: demo +``` +Here, `maxClientCnxns` is set to `70`, whereas the default value is `60`. + +Now, we will apply the secret with custom configuration. +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfiguration/secret.yaml +secret/zk-configuration created +``` + +In this section, we are going to create a ZooKeeper object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `ZooKeeper` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + configSecret: + name: zk-configuration + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" +``` + +Let's create the `ZooKeeper` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml +zookeeper.kubedb.com/zk-quickstart created +``` + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 23s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +Now, you can exec into the zookeeper pod and find if the custom configuration is there, + +```bash +$ Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ echo conf | nc localhost 2181 +clientPort=2181 +secureClientPort=-1 +dataDir=/data/version-2 +dataDirSize=134218330 +dataLogDir=/data/version-2 +dataLogSize=134218330 +tickTime=2000 +maxClientCnxns=70 +minSessionTimeout=4000 +maxSessionTimeout=40000 +clientPortListenBacklog=-1 +serverId=1 +initLimit=10 +syncLimit=2 +electionAlg=3 +electionPort=3888 +quorumPort=2888 +peerType=0 +membership: +server.1=zk-quickstart-0.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.2=zk-quickstart-1.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.3=zk-quickstart-2.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +version=100000011zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ exit +exit +``` + +As we can see from the configuration of running zookeeper, the value of `maxClientCnxns` has been set to `70`. + +### Reconfigure using new secret + +Now we will reconfigure this database to set `maxClientCnxns` to `100`. + +At first, we will create `secret` named new-configuration containing required configuration settings. + +```yaml +apiVersion: v1 +stringData: + zoo.cfg: | + maxClientCnxns=100 +kind: Secret +metadata: + name: zk-new-configuration + namespace: demo +``` +Here, `maxClientCnxns` is set to `100`. + +Now, we will apply the secret with custom configuration. +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfiguration/new-secret.yaml +secret/zk-new-configuration created +``` + +#### Create ZooKeeperOpsRequest + +Now, we will use this secret to replace the previous secret using a `ZooKeeperOpsRequest` CR. The `ZooKeeperOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-reconfig + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: zk-quickstart + configuration: + configSecret: + name: zk-new-configuration +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `zk-quickstart` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configSecret.name` specifies the name of the new secret. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml +zookeeperopsrequest.ops.kubedb.com/zk-reconfig created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `ZooKeeper` object. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ watch kubectl get zookeeperopsrequest -n demo +Every 2.0s: kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zk-reconfig Reconfigure Successful 1m +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zk-reconfig +Name: zk-reconfig +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-30T08:27:00Z + Generation: 1 + Resource Version: 1548116 + UID: 4f3daa11-c41b-4079-a8d8-1040931284ef +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: zk-new-configuration + Database Ref: + Name: zk-quickstart + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-10-30T08:27:00Z + Message: ZooKeeper ops-request has started to reconfigure ZooKeeper nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-10-30T08:27:08Z + Message: successfully reconciled the ZooKeeper with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-30T08:29:18Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-10-30T08:27:13Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-10-30T08:27:13Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-10-30T08:27:18Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-10-30T08:27:58Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-10-30T08:27:58Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-10-30T08:28:38Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-10-30T08:28:38Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-10-30T08:29:18Z + Message: Successfully completed reconfigure ZooKeeper + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + +Now need to check the new configuration we have provided. + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 20s +``` + +Now let’s exec into the zookeeper pod and check the new configuration we have provided. + +```bash +$ Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ echo conf | nc localhost 2181 +clientPort=2181 +secureClientPort=-1 +dataDir=/data/version-2 +dataDirSize=134218330 +dataLogDir=/data/version-2 +dataLogSize=134218330 +tickTime=2000 +maxClientCnxns=100 +minSessionTimeout=4000 +maxSessionTimeout=40000 +clientPortListenBacklog=-1 +serverId=1 +initLimit=10 +syncLimit=2 +electionAlg=3 +electionPort=3888 +quorumPort=2888 +peerType=0 +membership: +server.1=zk-quickstart-0.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.2=zk-quickstart-1.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.3=zk-quickstart-2.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +version=100000011zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ exit +exit +``` + +As we can see from the configuration of running zookeeper, the value of `maxClientCnxns` has been changed from `70` to `100`. So the reconfiguration of the zookeeper is successful. + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `maxClientCnxns` to `90`. This time we won't use a new secret. We will use the `applyConfig` field of the `ZooKeeperOpsRequest`. This will merge the new config in the existing secret. + +#### Create ZooKeeperOpsRequest + +Now, we will use the new configuration in the `data` field in the `ZooKeeperOpsRequest` CR. The `ZooKeeperOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-reconfig-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: zk-quickstart + configuration: + applyConfig: + zoo.cfg: | + maxClientCnxns=90 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `zk-quickstart` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/reconfiguration/zkops-apply-reconfiguration.yaml +zookeeperopsrequest.ops.kubedb.com/zk-reconfig-apply created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ watch kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zk-reconfig-apply Reconfigure Successful 38s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zk-reconfig-apply +Name: zk-reconfig-apply +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-30T08:27:00Z + Generation: 1 + Resource Version: 1548116 + UID: 4f3daa11-c41b-4079-a8d8-1040931284ef +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: zk-new-configuration + Database Ref: + Name: zk-quickstart + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-10-30T08:27:00Z + Message: ZooKeeper ops-request has started to reconfigure ZooKeeper nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-10-30T08:27:08Z + Message: successfully reconciled the ZooKeeper with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-30T08:29:18Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-10-30T08:27:13Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-10-30T08:27:13Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-10-30T08:27:18Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-10-30T08:27:58Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-10-30T08:27:58Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-10-30T08:28:38Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-10-30T08:28:38Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Last Transition Time: 2024-10-30T08:29:18Z + Message: Successfully completed reconfigure ZooKeeper + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + +Now need to check the new configuration we have provided. + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 20s +``` + +Now let’s exec into the zookeeper pod and check the new configuration we have provided. + +```bash +$ Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ echo conf | nc localhost 2181 +clientPort=2181 +secureClientPort=-1 +dataDir=/data/version-2 +dataDirSize=134218330 +dataLogDir=/data/version-2 +dataLogSize=134218330 +tickTime=2000 +maxClientCnxns=90 +minSessionTimeout=4000 +maxSessionTimeout=40000 +clientPortListenBacklog=-1 +serverId=1 +initLimit=10 +syncLimit=2 +electionAlg=3 +electionPort=3888 +quorumPort=2888 +peerType=0 +membership: +server.1=zk-quickstart-0.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.2=zk-quickstart-1.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +server.3=zk-quickstart-2.zk-quickstart-pods.demo.svc.cluster.local:2888:3888:participant;0.0.0.0:2181 +version=100000011zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ exit +exit +``` + +As we can see from the configuration of running zookeeper, the value of `maxClientCnxns` has been changed from `100` to `90`. So, the reconfiguration of the database using the `applyConfig` field is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete zk -n demo zk-quickstart +kubectl delete zookeeperopsrequest -n demo zk-reconfig zk-reconfig-apply +``` \ No newline at end of file diff --git a/docs/guides/zookeeper/restart/restart.md b/docs/guides/zookeeper/restart/restart.md index 387b837774..583f8b0de2 100644 --- a/docs/guides/zookeeper/restart/restart.md +++ b/docs/guides/zookeeper/restart/restart.md @@ -201,6 +201,5 @@ kubectl delete ns demo ## Next Steps -- Detail concepts of [ZooKeeper object](/docs/guides/zookeeper/concepts/zookeeper.md). - Detail concepts of [ZooKeeper object](/docs/guides/zookeeper/concepts/zookeeper.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/zookeeper/scaling/_index.md b/docs/guides/zookeeper/scaling/_index.md new file mode 100644 index 0000000000..4e80b1d73f --- /dev/null +++ b/docs/guides/zookeeper/scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Scaling ZooKeeper +menu: + docs_{{ .version }}: + identifier: zk-scaling + name: Scaling + parent: zk-zookeeper-guides + weight: 60 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/horizontal-scaling/_index.md b/docs/guides/zookeeper/scaling/horizontal-scaling/_index.md new file mode 100644 index 0000000000..edc4bdfa96 --- /dev/null +++ b/docs/guides/zookeeper/scaling/horizontal-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: zk-horizontal-scaling + name: Horizontal Scaling + parent: zk-scaling + weight: 10 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/horizontal-scaling/horizontal-scaling.md b/docs/guides/zookeeper/scaling/horizontal-scaling/horizontal-scaling.md new file mode 100644 index 0000000000..1caa212db8 --- /dev/null +++ b/docs/guides/zookeeper/scaling/horizontal-scaling/horizontal-scaling.md @@ -0,0 +1,419 @@ +--- +title: Horizontal Scaling ZooKeeper +menu: + docs_{{ .version }}: + identifier: zk-horizontal-scaling-ops + name: Scale Horizontally + parent: zk-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale ZooKeeper + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the ZooKeeper Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/zookeeper/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/zookeeper](/docs/examples/zookeeper) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on zookeeper + +Here, we are going to deploy a `ZooKeeper` using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Deploy ZooKeeper + +In this section, we are going to deploy a ZooKeeper. We are going to deploy a `ZooKeeper` with version `3.8.3`. Then, in the next section we will scale the zookeeper using `ZooKeeperOpsRequest` CRD. Below is the YAML of the `ZooKeeper` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + storageClassName: "standard" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" +``` +Let's create the `ZooKeeper` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/scaling/zookeeper.yaml +zookeeper.kubedb.com/zk-quickstart created +``` + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 5m56s +``` + +Let's check the number of replicas this zookeeper has from the ZooKeeper object, number of pods the PetSet have, + +```bash +$ kubectl get zookeeper -n demo zk-quickstart -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo zk-quickstart -o json | jq '.spec.replicas' +3 +``` + +We can see from both command that the zookeeper has 3 replicas. + +We are now ready to apply the `ZooKeeperOpsRequest` CR to scale this zookeeper. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the zookeeper to meet the desired number of replicas after scaling. + +#### Create ZooKeeperOpsRequest + +In order to scale up the replicas of the zookeeper, we have to create a `ZooKeeperOpsRequest` CR with our desired replicas. Below is the YAML of the `ZooKeeperOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zookeeper-horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: zk-quickstart + horizontalScaling: + replicas: 5 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `zk-quickstart` zookeeper. +- `spec.type` specifies that we are performing `HorizontalScaling` on our zookeeper. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/scaling/horizontal-scaling/zk-hscale-up-ops.yaml +zookeeperopsrequest.ops.kubedb.com/zookeeper-horizontal-scale-up created +``` + +#### Verify replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `ZooKeeper` object and related `PetSet`. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ watch kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zookeeper-horizontal-scale-up HorizontalScaling Successful 2m49s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to scale the zookeeper. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zookeeper-horizontal-scale-up +Name: zookeeper-horizontal-scale-up +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-25T13:37:43Z + Generation: 1 + Resource Version: 1198117 + UID: bfa6fb3f-5eb2-456c-8a3e-7a59097add0a +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Horizontal Scaling: + Replicas: 5 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-10-25T13:37:43Z + Message: ZooKeeper ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-10-25T13:38:03Z + Message: Successfully Scaled Up Node + Observed Generation: 1 + Reason: HorizontalScaleUp + Status: True + Type: HorizontalScaleUp + Last Transition Time: 2024-10-25T13:37:48Z + Message: patch petset; ConditionStatus:True; PodName:zk-quickstart-4 + Observed Generation: 1 + Status: True + Type: PatchPetset--zk-quickstart-4 + Last Transition Time: 2024-10-25T13:37:48Z + Message: zk-quickstart already has desired replicas + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2024-10-25T13:37:58Z + Message: is pod ready; ConditionStatus:True; PodName:zk-quickstart-4 + Observed Generation: 1 + Status: True + Type: IsPodReady--zk-quickstart-4 + Last Transition Time: 2024-10-25T13:37:58Z + Message: is node healthy; ConditionStatus:True; PodName:zk-quickstart-4 + Observed Generation: 1 + Status: True + Type: IsNodeHealthy--zk-quickstart-4 + Last Transition Time: 2024-10-25T13:38:03Z + Message: Successfully updated ZooKeeper + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-10-25T13:38:03Z + Message: Successfully completed the HorizontalScaling for FerretDB + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 47s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/horizontal-scale-up + Warning patch petset; ConditionStatus:True; PodName:zk-quickstart-4 42s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:zk-quickstart-4 + Warning is pod ready; ConditionStatus:False; PodName:zk-quickstart-4 37s KubeDB Ops-manager Operator is pod ready; ConditionStatus:False; PodName:zk-quickstart-4 + Warning is pod ready; ConditionStatus:True; PodName:zk-quickstart-4 32s KubeDB Ops-manager Operator is pod ready; ConditionStatus:True; PodName:zk-quickstart-4 + Warning is node healthy; ConditionStatus:True; PodName:zk-quickstart-4 32s KubeDB Ops-manager Operator is node healthy; ConditionStatus:True; PodName:zk-quickstart-4 + Normal HorizontalScaleUp 27s KubeDB Ops-manager Operator Successfully Scaled Up Node + Normal UpdateDatabase 27s KubeDB Ops-manager Operator Successfully updated ZooKeeper + Normal Starting 27s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 27s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: horizontal-scale-up +``` + +Now, we are going to verify the number of replicas this zookeeper has from the Pgpool object, number of pods the PetSet have, + +```bash +$ kubectl get zookeeper -n demo zk-quickstart -o json | jq '.spec.replicas' +5 + +$ kubectl get petset -n demo zk-quickstart -o json | jq '.spec.replicas' +5 +``` +From all the above outputs we can see that the replicas of the zookeeper is `5`. That means we have successfully scaled up the replicas of the ZooKeeper. + + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the zookeeper to meet the desired number of replicas after scaling. + +#### Create ZooKeeperOpsRequest + +In order to scale down the replicas of the zookeeper, we have to create a `ZooKeeperOpsRequest` CR with our desired replicas. Below is the YAML of the `ZooKeeperOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zookeeper-horizontal-scale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: zk-quickstart + horizontalScaling: + replicas: 3 + +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `zookeeper` zookeeper. +- `spec.type` specifies that we are performing `HorizontalScaling` on our zookeeper. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/scaling/horizontal-scaling/zk-hscale-down-ops.yaml +zookeeperopsrequest.ops.kubedb.com/zookeeper-horizontal-scale-down created +``` + +#### Verify replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `ZooKeeper` object and related `PetSet`. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ watch kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zookeeper-horizontal-scale-down HorizontalScaling Successful 75s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to scale the zookeeper. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zookeeper-horizontal-scale-down +Name: zookeeper-horizontal-scale-down +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-25T13:58:45Z + Generation: 1 + Resource Version: 1199568 + UID: 18b2adbb-9fbd-44fe-a265-e7eb4a292798 +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Horizontal Scaling: + Replicas: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-10-25T13:58:45Z + Message: ZooKeeper ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-10-25T14:00:23Z + Message: Successfully Scaled Down Node + Observed Generation: 1 + Reason: HorizontalScaleDown + Status: True + Type: HorizontalScaleDown + Last Transition Time: 2024-10-25T13:58:53Z + Message: patch petset; ConditionStatus:True; PodName:zk-quickstart-4 + Observed Generation: 1 + Status: True + Type: PatchPetset--zk-quickstart-4 + Last Transition Time: 2024-10-25T13:58:58Z + Message: get pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: GetPod + Last Transition Time: 2024-10-25T13:59:28Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-4 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-4 + Last Transition Time: 2024-10-25T13:59:28Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2024-10-25T13:59:38Z + Message: patch petset; ConditionStatus:True; PodName:zk-quickstart-3 + Observed Generation: 1 + Status: True + Type: PatchPetset--zk-quickstart-3 + Last Transition Time: 2024-10-25T13:59:38Z + Message: zk-quickstart already has desired replicas + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2024-10-25T14:00:13Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-3 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-3 + Last Transition Time: 2024-10-25T14:00:23Z + Message: Successfully updated ZooKeeper + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-10-25T14:00:23Z + Message: Successfully completed the HorizontalScaling for FerretDB + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m27s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/horizontal-scale-down + Normal Starting 3m27s KubeDB Ops-manager Operator Pausing ZooKeeper database: demo/zk-quickstart + Normal Successful 3m27s KubeDB Ops-manager Operator Successfully paused ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: horizontal-scale-down + Warning patch petset; ConditionStatus:True; PodName:zk-quickstart-4 3m19s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:zk-quickstart-4 + Warning get pod; ConditionStatus:False 3m14s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-4 2m44s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-4 + Warning delete pvc; ConditionStatus:True 2m44s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pod; ConditionStatus:False 2m44s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-4 2m39s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-4 + Warning delete pvc; ConditionStatus:True 2m39s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-4 2m39s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-4 + Warning patch petset; ConditionStatus:True; PodName:zk-quickstart-3 2m34s KubeDB Ops-manager Operator patch petset; ConditionStatus:True; PodName:zk-quickstart-3 + Warning get pod; ConditionStatus:False 2m29s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-3 119s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-3 + Warning delete pvc; ConditionStatus:True 119s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pod; ConditionStatus:False 119s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-3 114s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-3 + Warning delete pvc; ConditionStatus:True 114s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-3 114s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-3 + Normal HorizontalScaleDown 109s KubeDB Ops-manager Operator Successfully Scaled Down Node + Normal UpdateDatabase 109s KubeDB Ops-manager Operator Successfully updated ZooKeeper + Normal Starting 109s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 109s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: horizontal-scale-down +``` + +Now, we are going to verify the number of replicas this zookeeper has from the ZooKeeper object, number of pods the petset have, + +```bash +$ kubectl get zookeeper -n demo zk-quickstart -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo zk-quickstart -o json | jq '.spec.replicas' +3 +``` +From all the above outputs we can see that the replicas of the zookeeper is `3`. That means we have successfully scaled up the replicas of the ZooKeeper. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete zk -n demo +kubectl delete zookeeperopsrequest -n demo zookeeper-horizontal-scale-down +``` \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/horizontal-scaling/overview.md b/docs/guides/zookeeper/scaling/horizontal-scaling/overview.md new file mode 100644 index 0000000000..4f12b86e16 --- /dev/null +++ b/docs/guides/zookeeper/scaling/horizontal-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: ZooKeeper Horizontal Scaling Overview +menu: + docs_{{ .version }}: + identifier: zk-horizontal-scaling-overview + name: Overview + parent: zk-horizontal-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ZooKeeper Horizontal Scaling + +This guide will give an overview on how KubeDB Ops-manager operator scales up or down `ZooKeeper` cluster. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + +## How Horizontal Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator scales up or down `ZooKeeper` database components. Open the image in a new tab to see the enlarged version. + +
+  Horizontal scaling process of ZooKeeper +
Fig: Horizontal scaling process of ZooKeeper
+
+ +The Horizontal scaling process consists of the following steps: + +1. At first, a user creates a `ZooKeeper` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `ZooKeeper` CR. + +3. When the operator finds a `ZooKeeper` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to scale the `ZooKeeper` cluster, the user creates a `ZooKeeperOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ZooKeeperOpsRequest` CR. + +6. When it finds a `ZooKeeperOpsRequest` CR, it halts the `ZooKeeper` object which is referred from the `ZooKeeperOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `ZooKeeper` object during the horizontal scaling process. + +7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `ZooKeeperOpsRequest` CR. + +8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `ZooKeeper` object to reflect the updated state. + +9. After the successful scaling of the `ZooKeeper` replicas, the `KubeDB` Ops-manager operator resumes the `ZooKeeper` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the [next](/docs/guides/zookeeper/scaling/horizontal-scaling/horizontal-scaling.md) docs, we are going to show a step by step guide on horizontal scaling of ZooKeeper database using `ZooKeeperOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/vertical-scaling/_index.md b/docs/guides/zookeeper/scaling/vertical-scaling/_index.md new file mode 100644 index 0000000000..4b0fd3973a --- /dev/null +++ b/docs/guides/zookeeper/scaling/vertical-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Vertical Scaling +menu: + docs_{{ .version }}: + identifier: zk-vertical-scaling + name: Vertical Scaling + parent: zk-scaling + weight: 20 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/vertical-scaling/overview.md b/docs/guides/zookeeper/scaling/vertical-scaling/overview.md new file mode 100644 index 0000000000..67d462d379 --- /dev/null +++ b/docs/guides/zookeeper/scaling/vertical-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: ZooKeeper Vertical Scaling Overview +menu: + docs_{{ .version }}: + identifier: zk-vertical-scaling-overview + name: Overview + parent: zk-vertical-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ZooKeeper Vertical Scaling + +This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `ZooKeeper` database. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + +## How Vertical Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator updates the resources of the `ZooKeeper` database. Open the image in a new tab to see the enlarged version. + +
+  Vertical scaling process of ZooKeeper +
Fig: Vertical scaling process of ZooKeeper
+
+ +The vertical scaling process consists of the following steps: + +1. At first, a user creates a `ZooKeeper` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `ZooKeeper` CR. + +3. When the operator finds a `ZooKeeper` CR, it creates required number of `Petsets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `ZooKeeper` database the user creates a `ZooKeeperOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ZooKeeperOpsRequest` CR. + +6. When it finds a `ZooKeeperOpsRequest` CR, it halts the `ZooKeeper` object which is referred from the `ZooKeeperOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `ZooKeeper` object during the vertical scaling process. + +7. Then the `KubeDB` Ops-manager operator will update resources of the Petset Pods to reach desired state. + +8. After the successful update of the resources of the Petset's replica, the `KubeDB` Ops-manager operator updates the `ZooKeeper` object to reflect the updated state. + +9. After the successful update of the `ZooKeeper` resources, the `KubeDB` Ops-manager operator resumes the `ZooKeeper` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the [next](/docs/guides/zookeeper/scaling/vertical-scaling/vertical-scaling.md) docs, we are going to show a step by step guide on updating resources of ZooKeeper database using `ZooKeeperOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/zookeeper/scaling/vertical-scaling/vertical-scaling.md b/docs/guides/zookeeper/scaling/vertical-scaling/vertical-scaling.md new file mode 100644 index 0000000000..508beb5e07 --- /dev/null +++ b/docs/guides/zookeeper/scaling/vertical-scaling/vertical-scaling.md @@ -0,0 +1,293 @@ +--- +title: Vertical Scaling ZooKeeper +menu: + docs_{{ .version }}: + identifier: zk-vertical-scaling-ops + name: Scale Vertically + parent: zk-vertical-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale ZooKeeper Standalone + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a ZooKeeper standalone database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + - [Vertical Scaling Overview](/docs/guides/zookeeper/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/zookeeper](/docs/examples/zookeeper) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Standalone + +Here, we are going to deploy a `ZooKeeper` standalone using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare ZooKeeper Standalone Database + +Now, we are going to deploy a `ZooKeeper` standalone database with version `3.8.3`. + +### Deploy ZooKeeper standalone + +In this section, we are going to deploy a ZooKeeper standalone database. Then, in the next section we will update the resources of the database using `ZooKeeperOpsRequest` CRD. Below is the YAML of the `ZooKeeper` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + storageClassName: "standard" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" + +``` + +Let's create the `ZooKeeper` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/scaling/zookeeper.yaml +zookeeper.kubedb.com/zk-quickstart created +``` + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 5m56s +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo zk-quickstart-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +You can see the Pod has default resources which is assigned by the Kubedb operator. + +We are now ready to apply the `ZooKeeperOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the standalone database to meet the desired resources after scaling. + +#### Create ZooKeeperOpsRequest + +In order to update the resources of the database, we have to create a `ZooKeeperOpsRequest` CR with our desired resources. Below is the YAML of the `ZooKeeperOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: vscale + namespace: demo +spec: + databaseRef: + name: zk-quickstart + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 1 + memory: 2Gi + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `vscale` database. +- `spec.type` specifies that we are performing `VerticalScaling` on our database. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. +- Have a look [here](/docs/guides/zookeeper/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/scaling/vertical-scaling/zk-vscale.yaml +zookeeperopsrequest.ops.kubedb.com/vscale created +``` + +#### Verify ZooKeeper Standalone resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `ZooKeeper` object and related `Petsets` and `Pods`. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ kubectl get zookeeperopsrequest -n demo +Every 2.0s: kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +vscale VerticalScaling Successful 108s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to scale the database. + +```bash +$ kubectl describe zookeeperopsrequest -n demo vscale +Name: vscale +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-24T11:21:28Z + Generation: 1 + Resource Version: 1151711 + UID: 53ba9aef-cfa6-40f1-a5a8-6055bafb0c7b +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Timeout: 5m + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 1 + Memory: 2Gi + Requests: + Cpu: 1 + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2024-10-24T11:21:28Z + Message: ZooKeeper ops-request has started to vertically scaling the ZooKeeper nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-24T11:21:31Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-24T11:21:31Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: False + Type: RestartPods + Last Transition Time: 2024-10-24T11:21:36Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-0 + Last Transition Time: 2024-10-24T11:21:36Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-0 + Last Transition Time: 2024-10-24T11:21:41Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-10-24T11:22:16Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-1 + Last Transition Time: 2024-10-24T11:22:16Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-1 + Last Transition Time: 2024-10-24T11:22:56Z + Message: get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: GetPod--zk-quickstart-2 + Last Transition Time: 2024-10-24T11:22:56Z + Message: evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + Observed Generation: 1 + Status: True + Type: EvictPod--zk-quickstart-2 + Observed Generation: 1 + Phase: Progressing +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m24s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/vscale + Normal Starting 3m24s KubeDB Ops-manager Operator Pausing ZooKeeper database: demo/zk-quickstart + Normal Successful 3m24s KubeDB Ops-manager Operator Successfully paused ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: vscale + Normal UpdatePetSets 3m21s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-0 3m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-0 3m16s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-0 + Warning running pod; ConditionStatus:False 3m11s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-1 2m36s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-1 2m36s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-1 + Warning get pod; ConditionStatus:True; PodName:zk-quickstart-2 116s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:zk-quickstart-2 + Warning evict pod; ConditionStatus:True; PodName:zk-quickstart-2 116s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:zk-quickstart-2 + +``` + +Now, we are going to verify from the Pod yaml whether the resources of the standalone database has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo zk-quickstart-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "1", + "memory": "2Gi" + } +} +``` + +The above output verifies that we have successfully scaled up the resources of the ZooKeeper standalone database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete zk -n demo zk-quickstart +kubectl delete zookeeperopsrequest -n demo vscale +``` \ No newline at end of file diff --git a/docs/guides/zookeeper/tls/_index.md b/docs/guides/zookeeper/tls/_index.md new file mode 100644 index 0000000000..d1b1b06277 --- /dev/null +++ b/docs/guides/zookeeper/tls/_index.md @@ -0,0 +1,10 @@ +--- +title: Run ZooKeeper with TLS +menu: + docs_{{ .version }}: + identifier: zk-tls + name: TLS/SSL Encryption + parent: zk-zookeeper-guides + weight: 45 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/zookeeper/tls/configure-ssl.md b/docs/guides/zookeeper/tls/configure-ssl.md new file mode 100644 index 0000000000..ca6fa9696b --- /dev/null +++ b/docs/guides/zookeeper/tls/configure-ssl.md @@ -0,0 +1,268 @@ +--- +title: ZooKeeper TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: zk-tls-configure + name: ZooKeeper_SSL + parent: zk-tls + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run ZooKeeper Ensemble with TLS/SSL + +KubeDB supports providing TLS/SSL encryption for ZooKeeper Ensemble. This tutorial will show you how to use KubeDB to run a ZooKeeper Ensemble with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/zookeeper](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/zookeeper) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in ZooKeeper. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [zookeeper Concept Guide](/docs/guides/zookeeper/concepts/zookeeper.md), + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in ZooKeeper. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=zookeeper/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls zookeeper-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: zookeeper-ca-issuer + namespace: demo +spec: + ca: + secretName: zookeeper-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/tls/zookeeper-issuer.yaml +issuer.cert-manager.io/zookeeper-ca-issuer created +``` + +## TLS/SSL encryption in ZooKeeper Ensemble + +Below is the YAML for ZooKeeper with TLS enabled: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-tls + namespace: demo +spec: + version: "3.8.3" + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: zookeeper-ca-issuer + adminServerPort: 8080 + replicas: 5 + storage: + resources: + requests: + storage: "1Gi" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" + +``` + +Here, +- `spec.enableSSL` is set to `true` to enable TLS/SSL encryption. +- `spec.tls.issuerRef` refers to the `Issuer` that we have created in the previous step. +- +### Deploy ZOoKeeper Ensemble with TLS/SSL + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/tls/zookeeper-tls.yaml +zookeeper.kubedb.com/zk-tls created +``` + +Now, wait until `zookeeper-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get zookeeper -n demo +NAME TYPE VERSION STATUS AGE +zk-tls kubedb.com/v1alpha2 3.8.3 Ready 60s +``` + +### Verify TLS/SSL in ZooKeeper Ensemble + +```bash +$ kubectl describe secret -n demo zk-quickstart-client-cert +Name: zk-quickstart-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=zk-quickstart + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=zookeepers.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.zk-quickstart-pods.demo.svc.cluster.local,localhost,zk-quickstart,zk-quickstart-pods,zk-quickstart-pods.demo.svc,zk-quickstart-pods.demo... + cert-manager.io/certificate-name: zk-quickstart-client-cert + cert-manager.io/common-name: zk-quickstart-pods.demo.svc + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: zookeeper-ca-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +ca.crt: 1159 bytes +keystore.jks: 3258 bytes +tls-combined.pem: 3198 bytes +tls.crt: 1493 bytes +tls.key: 1704 bytes +truststore.jks: 873 bytes +``` + +Now, Let's exec into a ZooKeeper pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo zk-quickstart-0 -- bash +Defaulted container "zookeeper" out of: zookeeper, zookeeper-init (init) +zookeeper@zk-quickstart-0:/apache-zookeeper-3.8.3-bin$ cd ../var/private/ssl +zookeeper@zk-quickstart-0:/var/private/ssl$ openssl s_client -connect localhost:2182 -CAfile ca.crt -cert tls.crt -key tls.key +CONNECTED(00000003) +depth=1 CN = zookeeper, O = kubedb +verify return:1 +depth=0 CN = zk-quickstart.demo.svc +verify return:1 +--- +Certificate chain + 0 s:CN = zk-quickstart.demo.svc + i:CN = zookeeper, O = kubedb + a:PKEY: rsaEncryption, 2048 (bit); sigalg: RSA-SHA256 + v:NotBefore: Nov 4 05:46:21 2024 GMT; NotAfter: Feb 2 05:46:21 2025 GMT +--- +Server certificate +-----BEGIN CERTIFICATE----- +MIIEJTCCAw2gAwIBAgIQaWLGhg/TgVF8oXGcsLQkKjANBgkqhkiG9w0BAQsFADAl +MRIwEAYDVQQDDAl6b29rZWVwZXIxDzANBgNVBAoMBmt1YmVkYjAeFw0yNDExMDQw +NTQ2MjFaFw0yNTAyMDIwNTQ2MjFaMCExHzAdBgNVBAMTFnprLXF1aWNrc3RhcnQu +ZGVtby5zdmMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCeeiLZeNa7 +wHOUwD76fmp45Ae9qlpHCW/lGz+lGO48FBDUBbG2Tm2BZVW2297HOzb/Lax6Molb +9qCDsV7ITCUYXLBGz0pCGqGYS/icZupShhKAvD33Gn8kH/QeANwFonpxBAtr36vi +WxwcRD+dfVAu7OCATwSakZh3zdbRPQXLiAVqj8qn4zNSYL5bzUXQ5dHFzvgwZve5 +FR3QYLvVjUEu2tFjCKM+/HTzQ/IMUAjcU0lU4qnWqnhgcGp8ZE3hDyL9OOOsjrWx +CGNhB0Orf6Efztkqq4FMZ//w3DUQgnRglGKl1rGK015//W0MGSPlT4uve6Z7zaRU +aUqa7Y8P5wZxAgMBAAGjggFTMIIBTzAOBgNVHQ8BAf8EBAMCAqQwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FC7Wrn4SOKhsT4TQFEMtSao72H5TMB8GA1UdIwQYMBaAFDe7/VhWOllB39U/xOht +MxmZu9wQMIHMBgNVHREEgcQwgcGCKyouemstcXVpY2tzdGFydC1wb2RzLmRlbW8u +c3ZjLmNsdXN0ZXIubG9jYWyCCWxvY2FsaG9zdIINemstcXVpY2tzdGFydIISemst +cXVpY2tzdGFydC1wb2Rzght6ay1xdWlja3N0YXJ0LXBvZHMuZGVtby5zdmOCKXpr +LXF1aWNrc3RhcnQtcG9kcy5kZW1vLnN2Yy5jbHVzdGVyLmxvY2FsghZ6ay1xdWlj +a3N0YXJ0LmRlbW8uc3ZjhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCGGxgGzdjF +Vo9VALc6ddZD50M7bfh5L5z2KfSY4ZH7kuokM52LGzJYwREV3UpVAhjBqn0XEf9p +JX8ePo0Z9zjtWIIZg4ctjlCvKDy+HpKlqh2RJejnl+NoLPV628QJDiEksLzdVl4v +z36AwdGeUhADpvoGQiXUT6LgrD++Uv0akpDEzWOB2LUKsvCRKnxyBNyBqpsW8/Pu +DeC/RUGXT/JFtZtDBGp8d/FOIpJ0t/ZjrI9Hyu5DLFB08oTYmEVE3Lv2owZZV/o8 +6YqlpTu2efKEzMFZudUWpnGUrb69sZeDR9hwxGcAdKobTB8SZOBU61nsRn95BH7O +S4dKhcrbzP70 +-----END CERTIFICATE----- +subject=CN = zk-quickstart.demo.svc +issuer=CN = zookeeper, O = kubedb +--- +Acceptable client certificate CA names +CN = zookeeper, O = kubedb +Client Certificate Types: ECDSA sign, RSA sign, DSA sign +Requested Signature Algorithms: ECDSA+SHA256:ECDSA+SHA384:ECDSA+SHA512:RSA-PSS+SHA256:RSA-PSS+SHA384:RSA-PSS+SHA512:RSA-PSS+SHA256:RSA-PSS+SHA384:RSA-PSS+SHA512:RSA+SHA256:RSA+SHA384:RSA+SHA512:DSA+SHA256:ECDSA+SHA224:RSA+SHA224:DSA+SHA224:ECDSA+SHA1:RSA+SHA1:DSA+SHA1 +Shared Requested Signature Algorithms: ECDSA+SHA256:ECDSA+SHA384:ECDSA+SHA512:RSA-PSS+SHA256:RSA-PSS+SHA384:RSA-PSS+SHA512:RSA-PSS+SHA256:RSA-PSS+SHA384:RSA-PSS+SHA512:RSA+SHA256:RSA+SHA384:RSA+SHA512:DSA+SHA256:ECDSA+SHA224:RSA+SHA224:DSA+SHA224 +Peer signing digest: SHA256 +Peer signature type: RSA-PSS +Server Temp Key: X25519, 253 bits +--- +SSL handshake has read 1611 bytes and written 2553 bytes +Verification: OK +--- +New, TLSv1.2, Cipher is ECDHE-RSA-AES128-GCM-SHA256 +Server public key is 2048 bit +Secure Renegotiation IS supported +Compression: NONE +Expansion: NONE +No ALPN negotiated +SSL-Session: + Protocol : TLSv1.2 + Cipher : ECDHE-RSA-AES128-GCM-SHA256 + Session-ID: 057DF7D5B8BCE6DA3EAE6101136E644057BE67AF0A4931DC8FD15848D4E74D38 + Session-ID-ctx: + Master-Key: 807690ACC8782745D1C8AB6E4CF42FCAE7B13CAAC75A27FF4538FEA136DB9E6A332FDDB18703367593EBAD77629919C3 + PSK identity: None + PSK identity hint: None + SRP username: None + Start Time: 1730703067 + Timeout : 7200 (sec) + Verify return code: 0 (ok) + Extended master secret: yes +--- +``` + +From the above output, we can see that we are able to connect to the ZooKeeper Ensemble using the TLS configuration. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete zookeeper -n demo zk-tls +kubectl delete issuer -n demo zookeeper-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [ZooKeeper object](/docs/guides/zookeeper/concepts/zookeeper.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file diff --git a/docs/guides/zookeeper/tls/overview.md b/docs/guides/zookeeper/tls/overview.md new file mode 100644 index 0000000000..3da9624413 --- /dev/null +++ b/docs/guides/zookeeper/tls/overview.md @@ -0,0 +1,70 @@ +--- +title: ZooKeeper TLS/SSL Encryption Overview +menu: + docs_{{ .version }}: + identifier: zk-tls-overview + name: Overview + parent: zk-tls + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ZooKeeper TLS/SSL Encryption + +**Prerequisite :** To configure TLS/SSL in `ZooKeeper`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +To issue a certificate, the following crd of `cert-manager` is used: + +- `Issuer/ClusterIssuer`: Issuers, and ClusterIssuers represent certificate authorities (CAs) that are able to generate signed certificates by honoring certificate signing requests. All cert-manager certificates require a referenced issuer that is in a ready condition to attempt to honor the request. You can learn more details [here](https://cert-manager.io/docs/concepts/issuer/). + +- `Certificate`: `cert-manager` has the concept of Certificates that define a desired x509 certificate which will be renewed and kept up to date. You can learn more details [here](https://cert-manager.io/docs/concepts/certificate/). + +**ZooKeeper CRD Specification :** + +KubeDB uses following crd fields to enable SSL/TLS encryption in `ZooKeeper`. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificates` + +Read about the fields in details from [zookeeper concept](/docs/guides/zookeeper/concepts/zookeeper.md), + +When, `enableSSL` is set to `true`, the users must specify the `tls.issuerRef` field. `KubeDB` uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets using `Issuer/ClusterIssuers` specification. These certificates secrets including `ca.crt`, `tls.crt` and `tls.key` etc. are used to configure `zookeeper` server and clients. + +## How TLS/SSL configures in ZooKeeper + +The following figure shows how `KubeDB` enterprise used to configure TLS/SSL in ZooKeeper. Open the image in a new tab to see the enlarged version. + +
+Deploy ZooKeeper with TLS/SSL +
Fig: Deploy ZooKeeper with TLS/SSL
+
+ +Deploying ZooKeeper with TLS/SSL configuration process consists of the following steps: + +1. At first, a user creates a `Issuer/ClusterIssuer` CR. + +2. Then the user creates a `ZooKeeper` CR which refers to the `Issuer/ClusterIssuer` CR that the user created in the previous step. + +3. `KubeDB` Provisioner operator watches for the `ZooKeeper` CR. + +4. When it finds one, it creates `Secret`, `Service`, etc. for the `ZooKeeper` cluster. + +5. `KubeDB` Ops-manager operator watches for `ZooKeeper`(5c), `Issuer/ClusterIssuer`(5b), `Secret` and `Service`(5a). + +6. When it finds all the resources(`ZooKeeper`, `Issuer/ClusterIssuer`, `Secret`, `Service`), it creates `Certificates` by using `tls.issuerRef` and `tls.certificates` field specification from `ZooKeeper` CR. + +7. `cert-manager` watches for certificates. + +8. When it finds one, it creates certificate secrets `tls-secrets`(server, client, exporter secrets etc.) that holds the actual certificate signed by the CA. + +9. `KubeDB` Provisioner operator watches for the Certificate secrets `tls-secrets`. + +10. When it finds all the tls-secret, it creates the related `PetSets` so that ZooKeeper database can be configured with TLS/SSL. + +In the [next](/docs/guides/zookeeper/tls/configure-ssl.md) doc, we are going to show a step-by-step guide on how to configure a `ZooKeeper` cluster with TLS/SSL. \ No newline at end of file diff --git a/docs/guides/zookeeper/volume-expansion/_index.md b/docs/guides/zookeeper/volume-expansion/_index.md new file mode 100644 index 0000000000..b152726462 --- /dev/null +++ b/docs/guides/zookeeper/volume-expansion/_index.md @@ -0,0 +1,10 @@ +--- +title: Volume Expansion +menu: + docs_{{ .version }}: + identifier: zk-volume-expansion + name: Volume Expansion + parent: zk-zookeeper-guides + weight: 70 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/zookeeper/volume-expansion/overview.md b/docs/guides/zookeeper/volume-expansion/overview.md new file mode 100644 index 0000000000..371284bdd2 --- /dev/null +++ b/docs/guides/zookeeper/volume-expansion/overview.md @@ -0,0 +1,56 @@ +--- +title: ZooKeeper Volume Expansion Overview +menu: + docs_{{ .version }}: + identifier: zk-volume-expansion-overview + name: Overview + parent: zk-volume-expansion + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ZooKeeper Volume Expansion + +This guide will give an overview on how KubeDB Ops-manager operator expand the volume of `ZooKeeper` cluster nodes. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + +## How Volume Expansion Process Works + +The following diagram shows how KubeDB Ops-manager operator expand the volumes of `ZooKeeper` database components. Open the image in a new tab to see the enlarged version. + +
+  Volume Expansion process of ZooKeeper +
Fig: Volume Expansion process of ZooKeeper
+
+ +The Volume Expansion process consists of the following steps: + +1. At first, a user creates a `ZooKeeper` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `ZooKeeper` CR. + +3. When the operator finds a `ZooKeeper` CR, it creates required number of `Petsets` and related necessary stuff like secrets, services, etc. + +4. Each petset creates a Persistent Volume according to the Volume Claim Template provided in the PetSet configuration. This Persistent Volume will be expanded by the `KubeDB` Ops-manager operator. + +5. Then, in order to expand the volume the `ZooKeeper` database the user creates a `ZooKeeperOpsRequest` CR with desired information. + +6. `KubeDB` Ops-manager operator watches the `ZooKeeperOpsRequest` CR. + +7. When it finds a `ZooKeeperOpsRequest` CR, it halts the `ZooKeeper` object which is referred from the `ZooKeeperOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `ZooKeeper` object during the volume expansion process. + +8. Then the `KubeDB` Ops-manager operator will expand the persistent volume to reach the expected size defined in the `ZooKeeperOpsRequest` CR. + +9. After the successful Volume Expansion of the related Petset Pods, the `KubeDB` Ops-manager operator updates the new volume size in the `ZooKeeper` object to reflect the updated state. + +10. After the successful Volume Expansion of the `ZooKeeper` components, the `KubeDB` Ops-manager operator resumes the `ZooKeeper` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the [next](/docs/guides/zookeeper/volume-expansion/volume-expansion.md) docs, we are going to show a step-by-step guide on Volume Expansion of various ZooKeeper database components using `ZooKeeperOpsRequest` CRD. diff --git a/docs/guides/zookeeper/volume-expansion/volume-expansion.md b/docs/guides/zookeeper/volume-expansion/volume-expansion.md new file mode 100644 index 0000000000..ae2bba9d6f --- /dev/null +++ b/docs/guides/zookeeper/volume-expansion/volume-expansion.md @@ -0,0 +1,386 @@ +--- +title: ZooKeeper Volume Expansion +menu: + docs_{{ .version }}: + identifier: zk-volume-expansion-describe + name: Expand Storage Volume + parent: zk-volume-expansion + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Volume Expansion of ZooKeeper Ensemble + +This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a ZooKeeper database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- You must have a `StorageClass` that supports volume expansion. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [ZooKeeper](/docs/guides/zookeeper/concepts/zookeeper.md) + - [ZooKeeperOpsRequest](/docs/guides/zookeeper/concepts/opsrequest.md) + - [Volume Expansion Overview](/docs/guides/zookeeper/volume-expansion/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/ZooKeeper](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/zookeeper) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Expand Volume of ZooKeeper Ensemble + +Here, we are going to deploy a `ZooKeeper` standalone using a supported version by `KubeDB` operator. Then we are going to apply `ZooKeeperOpsRequest` to expand its volume. + +### Prepare ZooKeeper Ensemble + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +longhorn (default) driver.longhorn.io Delete Immediate true 93s +longhorn-static driver.longhorn.io Delete Immediate true 90s +``` + +We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `ZooKeeper` standalone database with version `3.8.3`. + +#### Deploy ZooKeeper Ensemble + +In this section, we are going to deploy a ZooKeeper standalone database with 1GB volume. Then, in the next section we will expand its volume to 2GB using `ZooKeeperOpsRequest` CRD. Below is the YAML of the `ZooKeeper` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zk-quickstart + namespace: demo +spec: + version: "3.8.3" + adminServerPort: 8080 + replicas: 3 + storage: + resources: + requests: + storage: "1Gi" + storageClassName: "longhorn" + accessModes: + - ReadWriteOnce + deletionPolicy: "WipeOut" +``` + +Let's create the `ZooKeeper` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/volume-expansion/zookeeper.yaml +zookeeper.kubedb.com/zk-quickstart created +``` + +Now, wait until `zk-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get zk -n demo +NAME VERSION STATUS AGE +zk-quickstart 3.8.3 Ready 5m56s +``` + +Let's check volume size from PetSet, and from the persistent volume, + +```bash +$ kubectl get petset -n demo zk-quickstart -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-3551d7c0-0df6-4f94-b1e0-21834319ecab 1Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-0 longhorn 92s +pvc-b5882e9e-3c61-4609-b5ba-0eb9f32edbbc 1Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-2 longhorn 58s +pvc-dccf2b12-d695-4792-8e4b-de4342e7fed4 1Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-1 longhorn 74s +``` + +You can see the PetSet has 1GB storage, and the capacity of the persistent volume is also 1GB. + +We are now ready to apply the `ZooKeeperOpsRequest` CR to expand the volume of this database. + +### Volume Expansion + +Here, we are going to expand the volume of the standalone database. + +#### Create ZooKeeperOpsRequest + +In order to expand the volume of the database, we have to create a `ZooKeeperOpsRequest` CR with our desired volume size. Below is the YAML of the `ZooKeeperOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ZooKeeperOpsRequest +metadata: + name: zk-offline-volume-expansion + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: zk-quickstart + volumeExpansion: + mode: "Offline" + node: 2Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `zk-quickstart` database. +- `spec.type` specifies that we are performing `VolumeExpansion` on our database. +- `spec.volumeExpansion.node` specifies the desired volume size. +- `spec.volumeExpansion.mode` specifies the desired volume expansion mode(`Online` or `Offline`). + +During `Online` VolumeExpansion KubeDB expands volume without pausing database object, it directly updates the underlying PVC. And for `Offline` volume expansion, the database is paused. The Pods are deleted and PVC is updated. Then the database Pods are recreated with updated PVC. + +Let's create the `ZooKeeperOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/zookeeper/volume-expansion/zkops-volume-exp-offline.yaml +zookeeperopsrequest.ops.kubedb.com/zk-offline-volume-expansion created +``` + +#### Verify ZooKeeper Standalone volume expanded successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `ZooKeeper` object and related `Petsets` and `Persistent Volume`. + +Let's wait for `ZooKeeperOpsRequest` to be `Successful`. Run the following command to watch `ZooKeeperOpsRequest` CR, + +```bash +$ kubectl get zookeeperopsrequest -n demo +NAME TYPE STATUS AGE +zk-offline-volume-expansion VolumeExpansion Successful 75s +``` + +We can see from the above output that the `ZooKeeperOpsRequest` has succeeded. If we describe the `ZooKeeperOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe zookeeperopsrequest -n demo zk-offline-volume-expansion +Name: zk-offline-volume-expansion +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ZooKeeperOpsRequest +Metadata: + Creation Timestamp: 2024-10-28T11:12:02Z + Generation: 1 + Resource Version: 1321277 + UID: 13851249-f148-4745-a565-0aaea704f830 +Spec: + Apply: IfReady + Database Ref: + Name: zk-quickstart + Type: VolumeExpansion + Volume Expansion: + Mode: Offline + Node: 2Gi +Status: + Conditions: + Last Transition Time: 2024-10-28T11:12:02Z + Message: ZooKeeper ops-request has started to expand volume of zookeeper nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-10-28T11:12:20Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-10-28T11:12:10Z + Message: get petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetset + Last Transition Time: 2024-10-28T11:12:10Z + Message: delete petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePetset + Last Transition Time: 2024-10-28T11:15:55Z + Message: successfully updated node PVC sizes + Observed Generation: 1 + Reason: UpdateNodePVCs + Status: True + Type: UpdateNodePVCs + Last Transition Time: 2024-10-28T11:15:05Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-10-28T11:12:25Z + Message: patch ops request; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsRequest + Last Transition Time: 2024-10-28T11:12:25Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2024-10-28T11:13:00Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-10-28T11:13:00Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2024-10-28T11:15:45Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-10-28T11:13:15Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-10-28T11:16:00Z + Message: successfully reconciled the ZooKeeper resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-28T11:16:05Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-10-28T11:16:05Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-10-28T11:16:05Z + Message: Successfully completed volumeExpansion for ZooKeeper + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 5m19s KubeDB Ops-manager Operator Start processing for ZooKeeperOpsRequest: demo/zk-offline-volume-expansion + Normal Starting 5m19s KubeDB Ops-manager Operator Pausing ZooKeeper database: demo/zk-quickstart + Normal Successful 5m19s KubeDB Ops-manager Operator Successfully paused ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zk-offline-volume-expansion + Warning get petset; ConditionStatus:True 5m11s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning delete petset; ConditionStatus:True 5m11s KubeDB Ops-manager Operator delete petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 5m6s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Normal OrphanPetSetPods 5m1s KubeDB Ops-manager Operator successfully deleted the petSets with orphan propagation policy + Warning get pod; ConditionStatus:True 4m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 4m56s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 4m56s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 4m51s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 4m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m21s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 4m21s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 4m21s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 4m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m11s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m6s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 4m6s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 4m6s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 4m6s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 3m56s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 3m56s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 3m51s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 3m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m21s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 3m21s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 3m21s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 3m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m11s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m6s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m1s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 3m1s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 3m1s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 3m1s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m51s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 2m51s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 2m51s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 2m46s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 2m16s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 2m16s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m11s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m6s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m1s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 116s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 116s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 111s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 111s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 106s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 106s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 101s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 101s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 96s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 96s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 96s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 96s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 96s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 91s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal UpdateNodePVCs 86s KubeDB Ops-manager Operator successfully updated node PVC sizes + Normal UpdatePetSets 81s KubeDB Ops-manager Operator successfully reconciled the ZooKeeper resources + Warning get pet set; ConditionStatus:True 76s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 76s KubeDB Ops-manager Operator PetSet is recreated + Normal Starting 76s KubeDB Ops-manager Operator Resuming ZooKeeper database: demo/zk-quickstart + Normal Successful 76s KubeDB Ops-manager Operator Successfully resumed ZooKeeper database: demo/zk-quickstart for ZooKeeperOpsRequest: zk-offline-volume-expansion +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the standalone database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo zk-quickstart -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"2Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-1b112414-6162-4e75-99c9-3e62cb4efb4a 2Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-1 longhorn 16m +pvc-3159b881-1954-4008-8594-599bee9fd11e 2Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-0 longhorn 17m +pvc-43ba80bd-9029-413e-b89c-1f373fd0cd3d 2Gi RWO Delete Bound demo/zk-quickstart-data-zk-quickstart-2 longhorn 16m +``` + +The above output verifies that we have successfully expanded the volume of the ZooKeeper standalone database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete zk -n demo zk-quickstart +kubectl delete zookeeperopsrequest -n demo zk-offline-volume-expansion +``` diff --git a/docs/images/day-2-operation/zookeeper/zk-horizontal-scaling.svg b/docs/images/day-2-operation/zookeeper/zk-horizontal-scaling.svg new file mode 100644 index 0000000000..636eac7ce0 --- /dev/null +++ b/docs/images/day-2-operation/zookeeper/zk-horizontal-scaling.svg @@ -0,0 +1,3 @@ + + +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
4.Initiate Horizontal Scaling
4.Initiate Hori...
6.Pause
6.Pause
7.Scaling Pod
7.Scaling Pod
8.Update Replica
8.Update Replica
9.Resume
9.Resume
Horizontal Scaling stage
Horizontal Scaling stage
User
User
            Provisioner        
              
              
StatefulSet's
Pod
Stateful...
StatefulSet's
Pod
Stateful...
5.Watch
5.Watch
            Ops Manager            
ZooKeeper
 OpsReques
t
ZooKeeper...
ZooKeeper
ZooKeeper
refers to
refers to
Updated
ZooKeeper
Update...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/images/day-2-operation/zookeeper/zk-reconfigure-tls.svg b/docs/images/day-2-operation/zookeeper/zk-reconfigure-tls.svg new file mode 100644 index 0000000000..b4d3ab4479 --- /dev/null +++ b/docs/images/day-2-operation/zookeeper/zk-reconfigure-tls.svg @@ -0,0 +1,3 @@ + + +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
4.Initiate Re-configuring TLS
4.Initiate Re-c...
6.Pause
6.Pause
8.Restart Pods
8.Restart Pods
7.Update TLS configuration
7.Update TLS configurati...
9.Resume
9.Resume
Reconfiguring TLS stage
Reconfiguring TLS stage
User
User
            Provisioner        
              
              
StatefulSet's
Pod
Stateful...
5.Watch
5.Watch
            Ops Manager            
ZooKeeper
 OpsReques
t
ZooKeeper...
ZooKeeper
ZooKeeper
refers to
refers to
Updated
ZooKeeper
Update...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/images/day-2-operation/zookeeper/zk-reconfigure.svg b/docs/images/day-2-operation/zookeeper/zk-reconfigure.svg new file mode 100644 index 0000000000..0201874843 --- /dev/null +++ b/docs/images/day-2-operation/zookeeper/zk-reconfigure.svg @@ -0,0 +1,3 @@ + + +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
4.Initiate Re-configuring
4.Initiate Re-c...
6.Pause
6.Pause
8.Restart Pods
8.Restart Pods
7.Update Configuration
7.Update Configurati...
9.Resume
9.Resume
Reconfiguring stage
Reconfiguring stage
User
User
            Provisioner        
              
              
StatefulSet's
Pod
Stateful...
5.Watch
5.Watch
            Ops Manager            
ZooKeeper
 OpsReques
t
ZooKeeper...
ZooKeeper
ZooKeeper
refers to
refers to
Updated
ZooKeeper
Update...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/images/day-2-operation/zookeeper/zk-version-update.svg b/docs/images/day-2-operation/zookeeper/zk-version-update.svg index 97259b8047..6b9c1ad93b 100644 --- a/docs/images/day-2-operation/zookeeper/zk-version-update.svg +++ b/docs/images/day-2-operation/zookeeper/zk-version-update.svg @@ -1,4 +1,4 @@ -
1.Create zookeeper
1.Create zookeeper
2.Watch
2.Watch
3.Create
3.Create
4.Initiate Update
4.Initiate Upgr...
6.Pause
6.Pause
7.Update & Perform Checks
7.Update & Perform...
8.Update Image
8.Update Image
9.Resume
9.Resume
Updating stage
Updating stage
User
User
                Community            Operator
           StatefulSet
Statef...
5.Watch
5.Watch
            Enterprise            Operator
zookeeper OpsRequest
zookeeper OpsRe...
zookeeper
zookeeper
Updated/New
StatefulSet
Upda...
refers to
refers to
Updated zookeeper
Upgrad...
Text is not SVG - cannot display
\ No newline at end of file +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
4.Initiate Update
4.Initiate Upgr...
6.Pause
6.Pause
7.Update & Perform Checks
7.Update & Perform...
8.Update Image
8.Update Image
9.Resume
9.Resume
Updating stage
Updating stage
User
User
                Provisioner            
           StatefulSet
Statef...
5.Watch
5.Watch
            Ops Manager            
ZooKeeper OpsRequest
ZooKeeper OpsRe...
ZooKeeper
ZooKeeper
Updated/New
StatefulSet
Upda...
refers to
refers to
Updated ZooKeeper
Upgrad...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/images/day-2-operation/zookeeper/zk-vertical-scaling.svg b/docs/images/day-2-operation/zookeeper/zk-vertical-scaling.svg new file mode 100644 index 0000000000..eb0e06d1f5 --- /dev/null +++ b/docs/images/day-2-operation/zookeeper/zk-vertical-scaling.svg @@ -0,0 +1,4 @@ + + +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
refers to
refers to
4.Initiate Vertical Scaling
4.Initiate Ve...
6.Pause
6.Pause
7.Scaling Resources
7.Scaling Resources
8.Update Resources
8.Update Resources
9.Resume
9.Resume
Vertical Scaling stage
Vertical Scaling stage
User
User
                Provisioner            
              
              
StatefulSet's
Pod
Stateful...
5.Watch
5.Wat...
            Ops Manager            
ZooKeeper
ZooKeeper
Updated
ZooKeeper
Update...
ZooKeeper OpsRequest
ZooKeeper Ops...
StatefulSet's
Pod updated
StatefulSet's...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/images/day-2-operation/zookeeper/zk-volume-expansion.svg b/docs/images/day-2-operation/zookeeper/zk-volume-expansion.svg new file mode 100644 index 0000000000..a9700e964c --- /dev/null +++ b/docs/images/day-2-operation/zookeeper/zk-volume-expansion.svg @@ -0,0 +1,3 @@ + + +
1.Create ZooKeeper
1.Create ZooKeeper
2.Watch
2.Watch
3.Create
3.Create
5.Initiate Volume Expansion
5.Initiate Volu...
7.Pause
7.Pause
8.Expand Volume
8.Expand Volume
9.Update Volume
9.Update Volume
10.Resume
10.Resume
Volume Expansion stage
Volume Expansion stage
User
User
            Provisioner        
              
              
Persistent
Volume
Persiste...
6.Watch
6.Watch
            Ops Manager            
ZooKeeper
 OpsReques
t
ZooKeeper...
ZooKeeper
ZooKeeper
              
              
StatefulSet
Stateful...
Expanded Persistent 
Volume
Expanded...
4.Create
4.Create
refers to
refers to
Updated
ZooKeeper
Update...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/images/zookeeper/zk-builtin-prom-target.png b/docs/images/zookeeper/zk-builtin-prom-target.png new file mode 100644 index 0000000000..15937cd1f3 Binary files /dev/null and b/docs/images/zookeeper/zk-builtin-prom-target.png differ diff --git a/docs/images/zookeeper/zk-tls.svg b/docs/images/zookeeper/zk-tls.svg new file mode 100644 index 0000000000..b0a3cc5be0 --- /dev/null +++ b/docs/images/zookeeper/zk-tls.svg @@ -0,0 +1,4 @@ + + + +            Ops Manager                          Provisioner            
service
se...
secret
se...
tls-secret
tls-secret
Cert- manager
Cert- ma...
StatefulSet
Statef...
Issuer/Cluster Issuer
Issuer...
ZooKeeper
ZooKeeper
Certificates
Certif...
User
User
2.Create
2.Create
1.Create
1.Create
5a.Watch
5a.Watch
3.Watch
3.Watch
4.Create
4.Create
5c.Watch
5c.Watch
6.Create
6.Create
7.Watch
7.Watch
uses
uses
8.Create
8.Create
9.Watch
9.Watch
10.Create
10.Create
5b.Watch
5b.Watch
refers to
refers to
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/images/zookeeper/zookeeper-prometheus.png b/docs/images/zookeeper/zookeeper-prometheus.png new file mode 100644 index 0000000000..ad73f32458 Binary files /dev/null and b/docs/images/zookeeper/zookeeper-prometheus.png differ diff --git a/docs/setup/install/kubedb.md b/docs/setup/install/kubedb.md index 7b9da4f8d8..a87d6e252a 100644 --- a/docs/setup/install/kubedb.md +++ b/docs/setup/install/kubedb.md @@ -100,7 +100,6 @@ $ watch kubectl get pods --all-namespaces -l "app.kubernetes.io/instance=kubedb" NAME READY STATUS RESTARTS AGE kubedb-kubedb-autoscaler-b5dd47dc5-bxnrq 1/1 Running 0 48s -kubedb-kubedb-dashboard-99db95dc4-j78w2 1/1 Running 0 48s kubedb-kubedb-ops-manager-6f766b86c6-h9m66 1/1 Running 0 48s kubedb-kubedb-provisioner-6fd44d5784-d8v9c 1/1 Running 0 48s kubedb-kubedb-webhook-server-6cf469bdf4-72wvz 1/1 Running 0 48s