diff --git a/deploy/crds/planetscale.com_vitessclusters.yaml b/deploy/crds/planetscale.com_vitessclusters.yaml index 818304f1..8a24b70a 100644 --- a/deploy/crds/planetscale.com_vitessclusters.yaml +++ b/deploy/crds/planetscale.com_vitessclusters.yaml @@ -35,6 +35,7 @@ spec: enum: - builtin - xtrabackup + - mysqlshell type: string locations: items: diff --git a/pkg/apis/planetscale/v2/vitesscluster_types.go b/pkg/apis/planetscale/v2/vitesscluster_types.go index 000088e9..c19ebc5a 100644 --- a/pkg/apis/planetscale/v2/vitesscluster_types.go +++ b/pkg/apis/planetscale/v2/vitesscluster_types.go @@ -316,7 +316,7 @@ type ClusterBackupSpec struct { // from one tablet in each shard. Otherwise, new tablets trying to restore // will find that the latest backup was created with the wrong engine. // Default: builtin - // +kubebuilder:validation:Enum=builtin;xtrabackup + // +kubebuilder:validation:Enum=builtin;xtrabackup;mysqlshell Engine VitessBackupEngine `json:"engine,omitempty"` // Subcontroller specifies any parameters needed for launching the VitessBackupStorage subcontroller pod. Subcontroller *VitessBackupSubcontrollerSpec `json:"subcontroller,omitempty"` @@ -337,6 +337,8 @@ const ( VitessBackupEngineBuiltIn VitessBackupEngine = "builtin" // VitessBackupEngineXtraBackup uses Percona XtraBackup for backups. VitessBackupEngineXtraBackup VitessBackupEngine = "xtrabackup" + // VitessBackupEngineMySQLShell uses MySQL Shell for backups. + VitessBackupEngineMySQLShell VitessBackupEngine = "mysqlshell" ) // LockserverSpec specifies either a deployed or external lockserver, diff --git a/pkg/operator/vttablet/backup.go b/pkg/operator/vttablet/backup.go index 5c34ea18..e5d3ab3e 100644 --- a/pkg/operator/vttablet/backup.go +++ b/pkg/operator/vttablet/backup.go @@ -19,14 +19,14 @@ package vttablet import ( "fmt" + corev1 "k8s.io/api/core/v1" planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2" "planetscale.dev/vitess-operator/pkg/operator/lazy" "planetscale.dev/vitess-operator/pkg/operator/vitess" "planetscale.dev/vitess-operator/pkg/operator/vitessbackup" - corev1 "k8s.io/api/core/v1" ) -func xtrabackupFlags(spec *Spec, backupThreads, restoreThreads int) vitess.Flags { +func xtrabackupFlags(backupThreads, restoreThreads int) vitess.Flags { flags := vitess.Flags{ "xtrabackup_user": xtrabackupUser, "xtrabackup_stream_mode": xtrabackupStreamMode, @@ -39,6 +39,15 @@ func xtrabackupFlags(spec *Spec, backupThreads, restoreThreads int) vitess.Flags return flags } +func mysqlshellFlags(backupLocation string) vitess.Flags { + flags := vitess.Flags{ + "mysql-shell-backup-location": backupLocation, + "mysql-shell-flags": mysqlshellExtraFlags, + } + + return flags +} + func init() { vttabletFlags.Add(func(s lazy.Spec) vitess.Flags { spec := s.(*Spec) @@ -51,7 +60,8 @@ func init() { "wait_for_backup_interval": waitForBackupInterval, "backup_engine_implementation": string(spec.BackupEngine), } - if spec.BackupEngine == planetscalev2.VitessBackupEngineXtraBackup { + switch spec.BackupEngine { + case planetscalev2.VitessBackupEngineXtraBackup: // When vttablets take backups, we let them keep serving, so we // limit to single-threaded to reduce the impact. backupThreads := 1 @@ -67,7 +77,10 @@ func init() { if restoreThreads < 1 { restoreThreads = 1 } - flags.Merge(xtrabackupFlags(spec, backupThreads, restoreThreads)) + flags.Merge(xtrabackupFlags(backupThreads, restoreThreads)) + case planetscalev2.VitessBackupEngineMySQLShell: + svm := vitessbackup.StorageVolumeMounts(spec.BackupLocation) + flags.Merge(mysqlshellFlags(svm[0].MountPath)) } clusterName := spec.Labels[planetscalev2.ClusterLabel] storageLocationFlags := vitessbackup.StorageFlags(spec.BackupLocation, clusterName) @@ -93,7 +106,7 @@ func init() { if threads < 1 { threads = 1 } - flags.Merge(xtrabackupFlags(spec, threads, threads)) + flags.Merge(xtrabackupFlags(threads, threads)) } clusterName := spec.Labels[planetscalev2.ClusterLabel] storageLocationFlags := vitessbackup.StorageFlags(spec.BackupLocation, clusterName) diff --git a/pkg/operator/vttablet/constants.go b/pkg/operator/vttablet/constants.go index 388bf580..89824ba9 100644 --- a/pkg/operator/vttablet/constants.go +++ b/pkg/operator/vttablet/constants.go @@ -109,6 +109,9 @@ const ( xtrabackupStripeCount = 8 xtrabackupUser = "vt_dba" + mysqlshellUser = "vt_dba" + mysqlshellExtraFlags = "--defaults-file=/dev/null --no-password --js -u " + mysqlshellUser + " -S " + mysqlSocketPath + // mysqlctlWaitTime is how long mysqlctld will wait for mysqld to start up // before assuming it's stuck and trying to restart it. We set this fairly // high because it can take a while to do crash recovery and it's rarely diff --git a/test/endtoend/operator/101_initial_cluster.yaml b/test/endtoend/operator/101_initial_cluster.yaml index b5f4c422..0bfd2e4f 100644 --- a/test/endtoend/operator/101_initial_cluster.yaml +++ b/test/endtoend/operator/101_initial_cluster.yaml @@ -157,6 +157,7 @@ stringData: CREATE USER 'vt_dba'@'localhost'; GRANT ALL ON *.* TO 'vt_dba'@'localhost'; GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + GRANT PROXY ON ''@'' TO 'vt_dba'@'localhost' WITH GRANT OPTION; # User for app traffic, with global read-write access. CREATE USER 'vt_app'@'localhost'; diff --git a/test/endtoend/operator/101_initial_cluster_backup.yaml b/test/endtoend/operator/101_initial_cluster_backup.yaml index 63fadac2..66bc4d8d 100644 --- a/test/endtoend/operator/101_initial_cluster_backup.yaml +++ b/test/endtoend/operator/101_initial_cluster_backup.yaml @@ -165,6 +165,7 @@ stringData: CREATE USER 'vt_dba'@'localhost'; GRANT ALL ON *.* TO 'vt_dba'@'localhost'; GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + GRANT PROXY ON ''@'' TO 'vt_dba'@'localhost' WITH GRANT OPTION; # User for app traffic, with global read-write access. CREATE USER 'vt_app'@'localhost'; diff --git a/test/endtoend/operator/101_initial_cluster_backup_schedule.yaml b/test/endtoend/operator/101_initial_cluster_backup_schedule.yaml index c1cf7737..8f3018b7 100644 --- a/test/endtoend/operator/101_initial_cluster_backup_schedule.yaml +++ b/test/endtoend/operator/101_initial_cluster_backup_schedule.yaml @@ -196,6 +196,7 @@ stringData: CREATE USER 'vt_dba'@'localhost'; GRANT ALL ON *.* TO 'vt_dba'@'localhost'; GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + GRANT PROXY ON ''@'' TO 'vt_dba'@'localhost' WITH GRANT OPTION; # User for app traffic, with global read-write access. CREATE USER 'vt_app'@'localhost'; diff --git a/test/endtoend/operator/101_initial_cluster_unmanaged_tablet.yaml b/test/endtoend/operator/101_initial_cluster_unmanaged_tablet.yaml index 57f1f699..77d6beef 100644 --- a/test/endtoend/operator/101_initial_cluster_unmanaged_tablet.yaml +++ b/test/endtoend/operator/101_initial_cluster_unmanaged_tablet.yaml @@ -153,6 +153,7 @@ stringData: CREATE USER 'vt_dba'@'localhost'; GRANT ALL ON *.* TO 'vt_dba'@'localhost'; GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + GRANT PROXY ON ''@'' TO 'vt_dba'@'localhost' WITH GRANT OPTION; # User for app traffic, with global read-write access. CREATE USER 'vt_app'@'localhost'; diff --git a/test/endtoend/operator/101_initial_cluster_vtorc_vtadmin.yaml b/test/endtoend/operator/101_initial_cluster_vtorc_vtadmin.yaml index e6ba0c5c..c3ef381b 100644 --- a/test/endtoend/operator/101_initial_cluster_vtorc_vtadmin.yaml +++ b/test/endtoend/operator/101_initial_cluster_vtorc_vtadmin.yaml @@ -172,6 +172,7 @@ stringData: CREATE USER 'vt_dba'@'localhost'; GRANT ALL ON *.* TO 'vt_dba'@'localhost'; GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + GRANT PROXY ON ''@'' TO 'vt_dba'@'localhost' WITH GRANT OPTION; # User for app traffic, with global read-write access. CREATE USER 'vt_app'@'localhost'; diff --git a/test/endtoend/operator/operator-latest.yaml b/test/endtoend/operator/operator-latest.yaml index b2e6122f..834bb153 100644 --- a/test/endtoend/operator/operator-latest.yaml +++ b/test/endtoend/operator/operator-latest.yaml @@ -1523,6 +1523,7 @@ spec: enum: - builtin - xtrabackup + - mysqlshell type: string locations: items: