From f3b7aeca3cbb243073ebcf61c51874f5a64481b7 Mon Sep 17 00:00:00 2001 From: yamatcha Date: Mon, 22 Jan 2024 02:46:06 +0000 Subject: [PATCH] Support backup and restore with ObjectBucketName set in environment variables Signed-off-by: yamatcha --- docs/usage.md | 29 ++++++ e2e/backup_with_env_test.go | 151 +++++++++++++++++++++++++++++ e2e/testdata/backup_with_env.yaml | 73 ++++++++++++++ e2e/testdata/restore_with_env.yaml | 49 ++++++++++ 4 files changed, 302 insertions(+) create mode 100644 e2e/backup_with_env_test.go create mode 100644 e2e/testdata/backup_with_env.yaml create mode 100644 e2e/testdata/restore_with_env.yaml diff --git a/docs/usage.md b/docs/usage.md index aff163f75..3dda2a4f4 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -457,6 +457,35 @@ spec: ... ``` +>**Note:** If you want to specify the ObjectBucket name in a ConfigMap or Secret, you can use `envFrom` and specify the environment variable name in `jobConfig.bucketConfig.bucketName` as follows. +>This behavior is tested. + +```yaml +apiVersion: moco.cybozu.com/v1beta2 +kind: BackupPolicy +metadata: + namespace: backup + name: daily +spec: + jobConfig: + bucketConfig: + bucketName: "$(BUCKET_NAME)" + endpointURL: http://minio.default.svc:9000 + usePathStyle: true + envFrom: + - configMapRef: + name: bucket-name +... +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: backup + name: bucket-name +data: + BUCKET_NAME: moco +``` + MOCO creates a [CronJob][] for each MySQLCluster that has `spec.backupPolicyName`. The CronJob's name is `moco-backup-` + the name of MySQLCluster. diff --git a/e2e/backup_with_env_test.go b/e2e/backup_with_env_test.go new file mode 100644 index 000000000..fed78b200 --- /dev/null +++ b/e2e/backup_with_env_test.go @@ -0,0 +1,151 @@ +package e2e + +import ( + "bytes" + _ "embed" + "encoding/json" + "strconv" + "strings" + "text/template" + "time" + + mocov1beta2 "github.com/cybozu-go/moco/api/v1beta2" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//go:embed testdata/backup_with_env.yaml +var backupWithEnvYAML string + +//go:embed testdata/restore_with_env.yaml +var restoreWithEnvYAML string + +var _ = Context("backup with ObjectBucketName is set in environments variables", func() { + if doUpgrade { + return + } + + var restorePoint time.Time + + It("should create a bucket", func() { + kubectlSafe([]byte(makeBucketYAML), "apply", "-f", "-") + Eventually(func(g Gomega) { + out, err := kubectl(nil, "get", "jobs", "make-bucket", "-o", "json") + g.Expect(err).NotTo(HaveOccurred()) + job := &batchv1.Job{} + err = json.Unmarshal(out, job) + g.Expect(err).NotTo(HaveOccurred()) + condComplete, err := getJobCondition(job, batchv1.JobComplete) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "make-bucket has not been finished") + }).Should(Succeed()) + }) + + It("should construct a source cluster", func() { + kubectlSafe(fillTemplate(backupWithEnvYAML), "apply", "-f", "-") + Eventually(func(g Gomega) { + cluster, err := getCluster("backup", "source") + g.Expect(err).NotTo(HaveOccurred()) + condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue)) + }).Should(Succeed()) + + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--", + "-e", "CREATE DATABASE test") + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--", + "-D", "test", "-e", "CREATE TABLE t (id INT NOT NULL AUTO_INCREMENT, data VARCHAR(32) NOT NULL, PRIMARY KEY (id), KEY key1 (data), KEY key2 (data, id)) ENGINE=InnoDB") + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--", + "-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('aaa')") + }) + + It("should take a full dump", func() { + kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-1") + Eventually(func(g Gomega) { + out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-1", "-o", "json") + g.Expect(err).NotTo(HaveOccurred()) + job := &batchv1.Job{} + err = json.Unmarshal(out, job) + g.Expect(err).NotTo(HaveOccurred()) + condComplete, err := getJobCondition(job, batchv1.JobComplete) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-1 has not been finished") + }).Should(Succeed()) + }) + + It("should take an incremental backup", func() { + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--", + "-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('bbb')") + time.Sleep(1100 * time.Millisecond) + restorePoint = time.Now().UTC() + time.Sleep(1100 * time.Millisecond) + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-admin", "source", "--", + "-D", "test", "--init_command=SET autocommit=1", "-e", "FLUSH LOCAL BINARY LOGS") + kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--", + "-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('ccc')") + time.Sleep(100 * time.Millisecond) + + kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-2") + Eventually(func(g Gomega) { + out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-2", "-o", "json") + g.Expect(err).NotTo(HaveOccurred()) + job := &batchv1.Job{} + err = json.Unmarshal(out, job) + g.Expect(err).NotTo(HaveOccurred()) + condComplete, err := getJobCondition(job, batchv1.JobComplete) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-2 has not been finished") + }).Should(Succeed()) + + cluster, err := getCluster("backup", "source") + Expect(err).NotTo(HaveOccurred()) + Expect(cluster.Status.Backup.BinlogSize).NotTo(Equal(int64(0))) + }) + + It("should destroy the source then restore the backup data", func() { + kubectlSafe(nil, "-n", "backup", "delete", "mysqlclusters", "source") + + tmpl, err := template.New("").Parse(restoreWithEnvYAML) + Expect(err).NotTo(HaveOccurred()) + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, struct { + MySQLVersion string + RestorePoint string + }{ + mysqlVersion, + restorePoint.Format(time.RFC3339), + }) + Expect(err).NotTo(HaveOccurred()) + + kubectlSafe(buf.Bytes(), "apply", "-f", "-") + Eventually(func(g Gomega) { + cluster, err := getCluster("backup", "target") + g.Expect(err).NotTo(HaveOccurred()) + condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue), "target is not healthy") + }).Should(Succeed()) + + out := kubectlSafe(nil, "moco", "-n", "backup", "mysql", "target", "--", + "-N", "-D", "test", "-e", "SELECT COUNT(*) FROM t") + count, err := strconv.Atoi(strings.TrimSpace(string(out))) + Expect(err).NotTo(HaveOccurred()) + Expect(count).To(Equal(2)) + }) + + It("should delete clusters", func() { + kubectlSafe(nil, "delete", "-n", "backup", "mysqlclusters", "--all") + + Eventually(func(g Gomega) { + out, err := kubectl(nil, "get", "-n", "backup", "pod", "-o", "json") + g.Expect(err).NotTo(HaveOccurred()) + pods := &corev1.PodList{} + err = json.Unmarshal(out, pods) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(pods.Items)).To(BeNumerically(">", 0), "wait until all Pods are deleted") + }).Should(Succeed()) + }) +}) diff --git a/e2e/testdata/backup_with_env.yaml b/e2e/testdata/backup_with_env.yaml new file mode 100644 index 000000000..8c52bbe2b --- /dev/null +++ b/e2e/testdata/backup_with_env.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: backup +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: backup + name: mycnf +data: + innodb_log_file_size: "10M" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: backup + name: backup-owner +--- +apiVersion: moco.cybozu.com/v1beta2 +kind: BackupPolicy +metadata: + namespace: backup + name: daily +spec: + schedule: "@daily" + jobConfig: + serviceAccountName: backup-owner + env: + - name: AWS_ACCESS_KEY_ID + value: minioadmin + - name: AWS_SECRET_ACCESS_KEY + value: minioadmin + bucketConfig: + bucketName: "$(BUCKET_NAME)" + endpointURL: http://minio.default.svc:9000 + usePathStyle: true + envFrom: + - configMapRef: + name: bucket-name + workVolume: + emptyDir: {} +--- +apiVersion: moco.cybozu.com/v1beta2 +kind: MySQLCluster +metadata: + namespace: backup + name: source +spec: + mysqlConfigMapName: mycnf + replicas: 3 + backupPolicyName: daily + podTemplate: + spec: + containers: + - name: mysqld + image: ghcr.io/cybozu-go/moco/mysql:{{ . }} + volumeClaimTemplates: + - metadata: + name: mysql-data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: backup + name: bucket-name +data: + BUCKET_NAME: moco diff --git a/e2e/testdata/restore_with_env.yaml b/e2e/testdata/restore_with_env.yaml new file mode 100644 index 000000000..13d593e1a --- /dev/null +++ b/e2e/testdata/restore_with_env.yaml @@ -0,0 +1,49 @@ +apiVersion: moco.cybozu.com/v1beta2 +kind: MySQLCluster +metadata: + namespace: backup + name: target +spec: + mysqlConfigMapName: mycnf + replicas: 1 + restore: + sourceName: source + sourceNamespace: backup + restorePoint: "{{ .RestorePoint }}" + jobConfig: + serviceAccountName: backup-owner + env: + - name: AWS_ACCESS_KEY_ID + value: minioadmin + - name: AWS_SECRET_ACCESS_KEY + value: minioadmin + bucketConfig: + bucketName: "$(BUCKET_NAME)" + endpointURL: http://minio.default.svc:9000 + usePathStyle: true + workVolume: + emptyDir: {} + envFrom: + - configMapRef: + name: bucket-name + podTemplate: + spec: + containers: + - name: mysqld + image: ghcr.io/cybozu-go/moco/mysql:{{ .MySQLVersion }} + volumeClaimTemplates: + - metadata: + name: mysql-data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: backup + name: bucket-name +data: + BUCKET_NAME: moco