Skip to content

Commit

Permalink
Merge pull request #637 from cybozu-go/support-backup-with-env
Browse files Browse the repository at this point in the history
Support backup and restore with ObjectBucketName set in environment variables
  • Loading branch information
yamatcha authored Jan 23, 2024
2 parents a7352fa + f3b7aec commit c789997
Show file tree
Hide file tree
Showing 4 changed files with 302 additions and 0 deletions.
29 changes: 29 additions & 0 deletions docs/usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,35 @@ spec:
...
```

>**Note:** If you want to specify the ObjectBucket name in a ConfigMap or Secret, you can use `envFrom` and specify the environment variable name in `jobConfig.bucketConfig.bucketName` as follows.
>This behavior is tested.

```yaml
apiVersion: moco.cybozu.com/v1beta2
kind: BackupPolicy
metadata:
namespace: backup
name: daily
spec:
jobConfig:
bucketConfig:
bucketName: "$(BUCKET_NAME)"
endpointURL: http://minio.default.svc:9000
usePathStyle: true
envFrom:
- configMapRef:
name: bucket-name
...
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
name: bucket-name
data:
BUCKET_NAME: moco
```

MOCO creates a [CronJob][] for each MySQLCluster that has `spec.backupPolicyName`.

The CronJob's name is `moco-backup-` + the name of MySQLCluster.
Expand Down
151 changes: 151 additions & 0 deletions e2e/backup_with_env_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
package e2e

import (
"bytes"
_ "embed"
"encoding/json"
"strconv"
"strings"
"text/template"
"time"

mocov1beta2 "github.com/cybozu-go/moco/api/v1beta2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

//go:embed testdata/backup_with_env.yaml
var backupWithEnvYAML string

//go:embed testdata/restore_with_env.yaml
var restoreWithEnvYAML string

var _ = Context("backup with ObjectBucketName is set in environments variables", func() {
if doUpgrade {
return
}

var restorePoint time.Time

It("should create a bucket", func() {
kubectlSafe([]byte(makeBucketYAML), "apply", "-f", "-")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "get", "jobs", "make-bucket", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
g.Expect(err).NotTo(HaveOccurred())
condComplete, err := getJobCondition(job, batchv1.JobComplete)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "make-bucket has not been finished")
}).Should(Succeed())
})

It("should construct a source cluster", func() {
kubectlSafe(fillTemplate(backupWithEnvYAML), "apply", "-f", "-")
Eventually(func(g Gomega) {
cluster, err := getCluster("backup", "source")
g.Expect(err).NotTo(HaveOccurred())
condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue))
}).Should(Succeed())

kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
"-e", "CREATE DATABASE test")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "-e", "CREATE TABLE t (id INT NOT NULL AUTO_INCREMENT, data VARCHAR(32) NOT NULL, PRIMARY KEY (id), KEY key1 (data), KEY key2 (data, id)) ENGINE=InnoDB")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('aaa')")
})

It("should take a full dump", func() {
kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-1")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-1", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
g.Expect(err).NotTo(HaveOccurred())
condComplete, err := getJobCondition(job, batchv1.JobComplete)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-1 has not been finished")
}).Should(Succeed())
})

It("should take an incremental backup", func() {
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('bbb')")
time.Sleep(1100 * time.Millisecond)
restorePoint = time.Now().UTC()
time.Sleep(1100 * time.Millisecond)
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-admin", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "FLUSH LOCAL BINARY LOGS")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('ccc')")
time.Sleep(100 * time.Millisecond)

kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-2")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-2", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
g.Expect(err).NotTo(HaveOccurred())
condComplete, err := getJobCondition(job, batchv1.JobComplete)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-2 has not been finished")
}).Should(Succeed())

cluster, err := getCluster("backup", "source")
Expect(err).NotTo(HaveOccurred())
Expect(cluster.Status.Backup.BinlogSize).NotTo(Equal(int64(0)))
})

It("should destroy the source then restore the backup data", func() {
kubectlSafe(nil, "-n", "backup", "delete", "mysqlclusters", "source")

tmpl, err := template.New("").Parse(restoreWithEnvYAML)
Expect(err).NotTo(HaveOccurred())
buf := new(bytes.Buffer)
err = tmpl.Execute(buf, struct {
MySQLVersion string
RestorePoint string
}{
mysqlVersion,
restorePoint.Format(time.RFC3339),
})
Expect(err).NotTo(HaveOccurred())

kubectlSafe(buf.Bytes(), "apply", "-f", "-")
Eventually(func(g Gomega) {
cluster, err := getCluster("backup", "target")
g.Expect(err).NotTo(HaveOccurred())
condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue), "target is not healthy")
}).Should(Succeed())

out := kubectlSafe(nil, "moco", "-n", "backup", "mysql", "target", "--",
"-N", "-D", "test", "-e", "SELECT COUNT(*) FROM t")
count, err := strconv.Atoi(strings.TrimSpace(string(out)))
Expect(err).NotTo(HaveOccurred())
Expect(count).To(Equal(2))
})

It("should delete clusters", func() {
kubectlSafe(nil, "delete", "-n", "backup", "mysqlclusters", "--all")

Eventually(func(g Gomega) {
out, err := kubectl(nil, "get", "-n", "backup", "pod", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
pods := &corev1.PodList{}
err = json.Unmarshal(out, pods)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(pods.Items)).To(BeNumerically(">", 0), "wait until all Pods are deleted")
}).Should(Succeed())
})
})
73 changes: 73 additions & 0 deletions e2e/testdata/backup_with_env.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
apiVersion: v1
kind: Namespace
metadata:
name: backup
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
name: mycnf
data:
innodb_log_file_size: "10M"
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: backup
name: backup-owner
---
apiVersion: moco.cybozu.com/v1beta2
kind: BackupPolicy
metadata:
namespace: backup
name: daily
spec:
schedule: "@daily"
jobConfig:
serviceAccountName: backup-owner
env:
- name: AWS_ACCESS_KEY_ID
value: minioadmin
- name: AWS_SECRET_ACCESS_KEY
value: minioadmin
bucketConfig:
bucketName: "$(BUCKET_NAME)"
endpointURL: http://minio.default.svc:9000
usePathStyle: true
envFrom:
- configMapRef:
name: bucket-name
workVolume:
emptyDir: {}
---
apiVersion: moco.cybozu.com/v1beta2
kind: MySQLCluster
metadata:
namespace: backup
name: source
spec:
mysqlConfigMapName: mycnf
replicas: 3
backupPolicyName: daily
podTemplate:
spec:
containers:
- name: mysqld
image: ghcr.io/cybozu-go/moco/mysql:{{ . }}
volumeClaimTemplates:
- metadata:
name: mysql-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
name: bucket-name
data:
BUCKET_NAME: moco
49 changes: 49 additions & 0 deletions e2e/testdata/restore_with_env.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
apiVersion: moco.cybozu.com/v1beta2
kind: MySQLCluster
metadata:
namespace: backup
name: target
spec:
mysqlConfigMapName: mycnf
replicas: 1
restore:
sourceName: source
sourceNamespace: backup
restorePoint: "{{ .RestorePoint }}"
jobConfig:
serviceAccountName: backup-owner
env:
- name: AWS_ACCESS_KEY_ID
value: minioadmin
- name: AWS_SECRET_ACCESS_KEY
value: minioadmin
bucketConfig:
bucketName: "$(BUCKET_NAME)"
endpointURL: http://minio.default.svc:9000
usePathStyle: true
workVolume:
emptyDir: {}
envFrom:
- configMapRef:
name: bucket-name
podTemplate:
spec:
containers:
- name: mysqld
image: ghcr.io/cybozu-go/moco/mysql:{{ .MySQLVersion }}
volumeClaimTemplates:
- metadata:
name: mysql-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
name: bucket-name
data:
BUCKET_NAME: moco

0 comments on commit c789997

Please sign in to comment.