From 76f7e08aa5cf2b85086d68638cfa8455cfbf1c31 Mon Sep 17 00:00:00 2001 From: Manan Gupta Date: Mon, 30 Dec 2024 12:14:58 +0530 Subject: [PATCH] feat: make all durability policy constatns Signed-off-by: Manan Gupta --- go/cmd/vtctldclient/command/keyspaces.go | 7 +- go/test/endtoend/keyspace/keyspace_test.go | 7 +- .../reparent/emergencyreparent/ers_test.go | 27 +++---- .../reparent/newfeaturetest/reparent_test.go | 13 ++-- .../reparent/plannedreparent/reparent_test.go | 25 +++---- .../reparent/semisync/semi_sync_test.go | 3 +- go/test/endtoend/reparent/utils/utils.go | 3 +- .../transaction/benchmark/bench_test.go | 3 +- .../transaction/twopc/fuzz/main_test.go | 5 +- .../endtoend/transaction/twopc/main_test.go | 3 +- .../transaction/twopc/metric/main_test.go | 3 +- .../transaction/twopc/stress/main_test.go | 5 +- go/test/endtoend/transaction/tx_test.go | 3 +- .../primaryfailure/primary_failure_test.go | 9 +-- go/test/endtoend/vtorc/utils/utils.go | 3 +- go/vt/vtctl/grpcvtctldserver/server_test.go | 8 +-- go/vt/vtctl/reparentutil/durability.go | 27 +++++-- .../reparentutil/durability_funcs_test.go | 52 +++++++------- go/vt/vtctl/reparentutil/durability_test.go | 10 +-- .../reparentutil/emergency_reparenter_test.go | 70 +++++++++---------- .../planned_reparenter_flaky_test.go | 16 ++--- .../reparentutil/reparent_sorter_test.go | 2 +- go/vt/vtctl/reparentutil/replication_test.go | 30 ++++---- go/vt/vtctl/reparentutil/util_test.go | 4 +- go/vt/vtctl/vtctl.go | 5 +- go/vt/vtctld/api_test.go | 3 +- go/vt/vtorc/inst/analysis_dao_test.go | 69 +++++++++--------- go/vt/vtorc/inst/keyspace_dao_test.go | 6 +- .../logic/keyspace_shard_discovery_test.go | 15 ++-- .../testlib/emergency_reparent_shard_test.go | 4 +- .../testlib/planned_reparent_shard_test.go | 9 +-- go/vt/wrangler/testlib/reparent_utils_test.go | 4 +- 32 files changed, 244 insertions(+), 209 deletions(-) diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index 565e0c8aa82..5c5da619767 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/constants/sidecar" @@ -153,7 +154,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { var snapshotTime *vttime.Time if topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) == topodatapb.KeyspaceType_SNAPSHOT { - if createKeyspaceOptions.DurabilityPolicy != "none" { + if createKeyspaceOptions.DurabilityPolicy != reparentutil.DurabilityNone { return errors.New("--durability-policy cannot be specified while creating a snapshot keyspace") } @@ -409,7 +410,7 @@ func init() { CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") - CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") Root.AddCommand(CreateKeyspace) @@ -425,7 +426,7 @@ func init() { RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified keyspace.") Root.AddCommand(RemoveKeyspaceCell) - SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") + SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") Root.AddCommand(SetKeyspaceDurabilityPolicy) ValidateSchemaKeyspace.Flags().BoolVar(&validateSchemaKeyspaceOptions.IncludeViews, "include-views", false, "Includes views in compared schemas.") diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index f65301b9bb4..ee7e072a010 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/vtctl/reparentutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -142,18 +143,18 @@ func TestDurabilityPolicyField(t *testing.T) { out, err := vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "ks_durability", "--durability-policy=semi_sync") require.NoError(t, err, out) - checkDurabilityPolicy(t, "semi_sync") + checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync) out, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", "ks_durability", "--durability-policy=none") require.NoError(t, err, out) - checkDurabilityPolicy(t, "none") + checkDurabilityPolicy(t, reparentutil.DurabilityNone) out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) - checkDurabilityPolicy(t, "semi_sync") + checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 0d2eb8935d2..8eabf6013dd 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -28,10 +28,11 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) func TestTrivialERS(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -55,7 +56,7 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -96,7 +97,7 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -131,7 +132,7 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -166,7 +167,7 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -193,7 +194,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -222,7 +223,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -249,7 +250,7 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -271,7 +272,7 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -342,7 +343,7 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -441,7 +442,7 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -468,7 +469,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -507,7 +508,7 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index a041ca04c68..1b5af658cb3 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) // TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values @@ -36,7 +37,7 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -67,7 +68,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // Confirm that the replication is setup correctly in the beginning. @@ -102,7 +103,7 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -114,7 +115,7 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -159,7 +160,7 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -176,7 +177,7 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { - clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupShardedReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) // Stop all VTOrc instances, so that they don't interfere with the test. diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 94e37d715f4..0d2c211395b 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -33,10 +33,11 @@ import ( "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -47,7 +48,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -60,7 +61,7 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -82,7 +83,7 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -108,7 +109,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -125,7 +126,7 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.DeleteTablet(t, clusterInstance, tablets[2]) @@ -172,13 +173,13 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -277,7 +278,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -323,7 +324,7 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -389,7 +390,7 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "cross_cell") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityCrossCell) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -428,7 +429,7 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index df9bf192e65..c04e39463bc 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) func TestSemiSyncUpgradeDowngrade(t *testing.T) { @@ -33,7 +34,7 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) { if ver != 21 { t.Skip("We only want to run this test for v21 release") } - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 2a51262557b..8a120d1c971 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -32,6 +32,7 @@ import ( "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/mysql" @@ -71,7 +72,7 @@ func SetupReparentCluster(t *testing.T, durability string) *cluster.LocalProcess // SetupRangeBasedCluster sets up the range based cluster func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalProcessCluster { - return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, "semi_sync") + return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, reparentutil.DurabilitySemiSync) } // SetupShardedReparentCluster is used to setup a sharded cluster for testing diff --git a/go/test/endtoend/transaction/benchmark/bench_test.go b/go/test/endtoend/transaction/benchmark/bench_test.go index a42c9bca9c1..891ffa3d7b3 100644 --- a/go/test/endtoend/transaction/benchmark/bench_test.go +++ b/go/test/endtoend/transaction/benchmark/bench_test.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) var ( @@ -65,7 +66,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 1, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go index 4d168fbdde0..72f238056ab 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/main_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) var ( @@ -78,7 +79,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -89,7 +90,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go index 3607beea72a..7c82fcccceb 100644 --- a/go/test/endtoend/transaction/twopc/main_test.go +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -32,6 +32,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -92,7 +93,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go index 61a43017ef9..c38a4002c08 100644 --- a/go/test/endtoend/transaction/twopc/metric/main_test.go +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) var ( @@ -78,7 +79,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go index 4da4f86bdff..9c831cecf0d 100644 --- a/go/test/endtoend/transaction/twopc/stress/main_test.go +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) var ( @@ -80,7 +81,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -91,7 +92,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/tx_test.go b/go/test/endtoend/transaction/tx_test.go index 89531952b13..a30fcdb062d 100644 --- a/go/test/endtoend/transaction/tx_test.go +++ b/go/test/endtoend/transaction/tx_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) var ( @@ -69,7 +70,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 9017d35a8c5..5842c0434de 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/logic" ) @@ -44,7 +45,7 @@ func TestDownPrimary(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, "semi_sync") + }, 1, reparentutil.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -115,7 +116,7 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, reparentutil.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] curPrimary := shard0.Vttablets[0] @@ -170,7 +171,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, reparentutil.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -241,7 +242,7 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, "semi_sync") + }, 1, reparentutil.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 456d55518dd..573927cdcb3 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -40,6 +40,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" // Register topo implementations. _ "vitess.io/vitess/go/vt/topo/consultopo" @@ -299,7 +300,7 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep } if durability == "" { - durability = "none" + durability = reparentutil.DurabilityNone } out, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) require.NoError(t, err, out) diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 93f302a1097..bfea278e245 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -2777,14 +2777,14 @@ func TestCreateKeyspace(t *testing.T) { req: &vtctldatapb.CreateKeyspaceRequest{ Name: "testkeyspace", Type: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, }, expected: &vtctldatapb.CreateKeyspaceResponse{ Keyspace: &vtctldatapb.Keyspace{ Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, }, }, }, @@ -11338,11 +11338,11 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { }, req: &vtctldatapb.SetKeyspaceDurabilityPolicyRequest{ Keyspace: "ks1", - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, }, expected: &vtctldatapb.SetKeyspaceDurabilityPolicyResponse{ Keyspace: &topodatapb.Keyspace{ - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, }, }, }, diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go index 29a5b2e712a..31a110bdbd8 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/durability.go @@ -37,32 +37,47 @@ var ( durabilityPolicies = make(map[string]NewDurabler) ) +const ( + // DurabilityNone is the name of the durability policy that has no semi-sync setup. + DurabilityNone = "none" + // DurabilitySemiSync is the name of the durability policy that has 1 semi-sync setup. + DurabilitySemiSync = "semi_sync" + // DurabilityCrossCell is the name of the durability policy that has 1 semi-sync setup but only allows Primary and Replica type servers from a different cell to acknowledge semi sync. + DurabilityCrossCell = "cross_cell" + // DurabilitySemiSyncWithRdonlyAck is the name of the durability policy that has 1 semi-sync setup and allows the "rdonly" to send semi-sync acks as well. + DurabilitySemiSyncWithRdonlyAck = "semi_sync_with_rdonly_ack" + // DurabilityCrossCellWithRdonlyAck is the name of the durability policy that has 1 semi-sync setup but only allows Primary and Replica type servers from a different cell to acknowledge semi sync. It also allows the "rdonly" to send semi-sync acks. + DurabilityCrossCellWithRdonlyAck = "cross_cell_with_rdonly_ack" + // DurabilityTest is the name of the durability policy that has no semi-sync setup but overrides the type for a specific tablet to prefer. It is only meant to be used for testing purposes! + DurabilityTest = "test" +) + func init() { // register all the durability rules with their functions to create them - RegisterDurability("none", func() Durabler { + RegisterDurability(DurabilityNone, func() Durabler { return &durabilityNone{} }) - RegisterDurability("semi_sync", func() Durabler { + RegisterDurability(DurabilitySemiSync, func() Durabler { return &durabilitySemiSync{ rdonlySemiSync: false, } }) - RegisterDurability("cross_cell", func() Durabler { + RegisterDurability(DurabilityCrossCell, func() Durabler { return &durabilityCrossCell{ rdonlySemiSync: false, } }) - RegisterDurability("semi_sync_with_rdonly_ack", func() Durabler { + RegisterDurability(DurabilitySemiSyncWithRdonlyAck, func() Durabler { return &durabilitySemiSync{ rdonlySemiSync: true, } }) - RegisterDurability("cross_cell_with_rdonly_ack", func() Durabler { + RegisterDurability(DurabilityCrossCellWithRdonlyAck, func() Durabler { return &durabilityCrossCell{ rdonlySemiSync: true, } }) - RegisterDurability("test", func() Durabler { + RegisterDurability(DurabilityTest, func() Durabler { return &durabilityTest{} }) } diff --git a/go/vt/vtctl/reparentutil/durability_funcs_test.go b/go/vt/vtctl/reparentutil/durability_funcs_test.go index 21eb308a4b0..546ae47df20 100644 --- a/go/vt/vtctl/reparentutil/durability_funcs_test.go +++ b/go/vt/vtctl/reparentutil/durability_funcs_test.go @@ -73,25 +73,25 @@ func TestSemiSyncAckersForPrimary(t *testing.T) { }{ { name: "no other tablets", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet}, wantSemiSyncAckers: nil, }, { name: "'none' durability policy", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: nil, }, { name: "'semi_sync' durability policy", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet}, }, { name: "'cross_cell' durability policy", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaCrossCellTablet}, @@ -118,7 +118,7 @@ func Test_haveRevokedForTablet(t *testing.T) { }{ { name: "'none' durability policy - not revoked", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -129,7 +129,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'none' durability policy - revoked", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -140,7 +140,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -151,7 +151,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -162,7 +162,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -173,7 +173,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -184,7 +184,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - primary in list", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -215,7 +215,7 @@ func Test_haveRevoked(t *testing.T) { }{ { name: "'none' durability policy - all tablets revoked", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -225,7 +225,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - all tablets revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -235,7 +235,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - all tablets revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -245,7 +245,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - revoked", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, }, @@ -255,7 +255,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyTablet, }, @@ -265,7 +265,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ replicaCrossCellTablet, }, @@ -275,7 +275,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - not revoked", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -285,7 +285,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -295,7 +295,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -325,7 +325,7 @@ func Test_canEstablishForTablet(t *testing.T) { }{ { name: "primary not reached", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -333,7 +333,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -341,7 +341,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -349,7 +349,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "established", - durabilityPolicy: "none", + durabilityPolicy: DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, @@ -357,7 +357,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, @@ -365,7 +365,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go index 5745da64f7e..52480362be6 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/durability_test.go @@ -29,7 +29,7 @@ import ( ) func TestDurabilityNone(t *testing.T) { - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) promoteRule := PromotionRule(durability, &topodatapb.Tablet{ @@ -78,10 +78,10 @@ func TestDurabilitySemiSync(t *testing.T) { rdonlySemiSync bool }{ { - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, rdonlySemiSync: false, }, { - durabilityPolicy: "semi_sync_with_rdonly_ack", + durabilityPolicy: DurabilitySemiSyncWithRdonlyAck, rdonlySemiSync: true, }, } @@ -176,10 +176,10 @@ func TestDurabilityCrossCell(t *testing.T) { rdonlySemiSync bool }{ { - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, rdonlySemiSync: false, }, { - durabilityPolicy: "cross_cell_with_rdonly_ack", + durabilityPolicy: DurabilityCrossCellWithRdonlyAck, rdonlySemiSync: true, }, } diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 3669c34dc11..0129397b415 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -129,7 +129,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ { name: "success", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -238,7 +238,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success - 1 replica and 1 rdonly failure", - durability: "semi_sync", + durability: DurabilitySemiSync, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -372,7 +372,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { // Here, all our tablets are tied, so we're going to explicitly pick // zone1-101. name: "success with requested primary-elect", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 101, @@ -483,7 +483,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success with existing primary", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { @@ -594,7 +594,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard not found", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, unlockTopo: true, // we shouldn't try to lock the nonexistent shard @@ -607,7 +607,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot stop replication", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -666,7 +666,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "lost topo lock", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -725,7 +725,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot get reparent candidates", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -799,7 +799,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "zero valid reparent candidates", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, shards: []*vtctldatapb.Shard{ @@ -816,7 +816,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "error waiting for relay logs to apply", - durability: "none", + durability: DurabilityNone, // one replica is going to take a minute to apply relay logs emergencyReparentOps: EmergencyReparentOptions{ WaitReplicasTimeout: time.Millisecond * 50, @@ -911,7 +911,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not in tablet map", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 200, @@ -1001,7 +1001,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not winning primary-elect", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication Cell: "zone1", Uid: 102, @@ -1124,7 +1124,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot promote new primary", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 102, @@ -1237,7 +1237,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "promotion-rule - no valid candidates for emergency reparent", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1344,7 +1344,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary - must not promotion rule", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1456,7 +1456,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cross cell - no valid candidates", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1575,7 +1575,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary in a different cell", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ PreventCrossCellPromotion: true, NewPrimaryAlias: &topodatapb.TabletAlias{ @@ -1700,7 +1700,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary cannot make progress", - durability: "cross_cell", + durability: DurabilityCrossCell, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1815,7 +1815,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "expected primary mismatch", - durability: "none", + durability: DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ ExpectedPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -2333,7 +2333,7 @@ func TestEmergencyReparenter_promotionOfNewPrimary(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3021,7 +3021,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -3502,7 +3502,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.remoteOpTimeout != 0 { @@ -4092,7 +4092,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4336,7 +4336,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) test.emergencyReparentOps.durability = durability logger := logutil.NewMemoryLogger() @@ -4355,7 +4355,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { // TestParentContextCancelled tests that even if the parent context of reparentReplicas cancels, we should not cancel the context of // SetReplicationSource since there could be tablets that are running it even after ERS completes. func TestParentContextCancelled(t *testing.T) { - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) // Setup ERS options with a very high wait replicas timeout emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} @@ -4486,28 +4486,28 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { }{ { name: "filter must not", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet}, }, { name: "host taking backup must not be on the list when there are other candidates", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "host taking backup must be the only one on the list when there are no other candidates", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaTablet}, }, { name: "filter cross cell", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4523,14 +4523,14 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, }, { name: "filter establish", - durability: "cross_cell", + durability: DurabilityCrossCell, validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: nil, }, { name: "filter mixed", - durability: "cross_cell", + durability: DurabilityCrossCell, prevPrimary: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone-2", @@ -4545,7 +4545,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "error - requested primary must not", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4555,7 +4555,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-1-0000000003 has a must not promotion rule", }, { name: "error - requested primary not in same cell", - durability: "none", + durability: DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4567,7 +4567,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-2-0000000002 is is a different cell as the previous primary", }, { name: "error - requested primary cannot establish", - durability: "cross_cell", + durability: DurabilityCrossCell, validTablets: allTablets, tabletsTakingBackup: noTabletsTakingBackup, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, @@ -5525,7 +5525,7 @@ func TestEmergencyReparenterFindErrantGTIDs(t *testing.T) { slices.Sort(keys) require.ElementsMatch(t, tt.wantedCandidates, keys) - dp, err := GetDurabilityPolicy("semi_sync") + dp, err := GetDurabilityPolicy(DurabilitySemiSync) require.NoError(t, err) ers := EmergencyReparenter{logger: logutil.NewCallbackLogger(func(*logutilpb.Event) {})} winningPrimary, _, err := ers.findMostAdvanced(candidates, tt.tabletMap, EmergencyReparentOptions{durability: dp}) diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index 8e2ee8f9df7..b0432d666c4 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -1186,7 +1186,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.opts.durability == nil { - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) tt.opts.durability = durability } @@ -1799,7 +1799,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) tt.opts.durability = durability @@ -1946,7 +1946,7 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) pos, err := pr.performInitialPromotion( ctx, @@ -3423,7 +3423,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }{ { name: "success - durability = none", - durability: "none", + durability: DurabilityNone, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3490,7 +3490,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, { name: "success - durability = semi_sync", - durability: "semi_sync", + durability: DurabilitySemiSync, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3556,7 +3556,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "success - promote replica required", - durability: "semi_sync", + durability: DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3632,7 +3632,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "Promote replica failed", - durability: "semi_sync", + durability: DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3977,7 +3977,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { t.Parallel() pr := NewPlannedReparenter(nil, tt.tmc, logger) - durabilityPolicy := "none" + durabilityPolicy := DurabilityNone if tt.durability != "" { durabilityPolicy = tt.durability } diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go index 87e7b253d54..3dfcdbb5228 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go @@ -135,7 +135,7 @@ func TestReparentSorter(t *testing.T) { }, } - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 1b36186efb8..8f867101376 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -289,7 +289,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ { name: "success", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -358,7 +358,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "success with wait for all tablets", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -428,7 +428,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "timing check with wait for all tablets", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -514,7 +514,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 2 rdonly failures", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -608,7 +608,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 1 rdonly and 1 replica failures", - durability: "semi_sync", + durability: DurabilitySemiSync, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -702,7 +702,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "ignore tablets", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -762,7 +762,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "have PRIMARY tablet and can demote", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -841,7 +841,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet is PRIMARY and cannot demote", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -906,7 +906,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets are PRIMARY and cannot demote", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -959,7 +959,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "stopReplicasTimeout exceeded", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000100": time.Minute, // zone1-0000000100 will timeout and not be included @@ -1023,7 +1023,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet fails to StopReplication", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1080,7 +1080,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets fail StopReplication", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1121,7 +1121,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: true, }, { name: "1 tablets fail StopReplication and 1 has replication stopped", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1166,7 +1166,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "slow tablet is the new primary requested", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000102": 1 * time.Second, // zone1-0000000102 is slow to respond but has to be included since it is the requested primary @@ -1268,7 +1268,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "Handle nil replication status After. No segfaulting when determining backup status, and fall back to Before status", - durability: "none", + durability: DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index ac44da8175a..c754013fb4f 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -1014,7 +1014,7 @@ zone1-0000000100 is not a replica`, }, } - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1829,7 +1829,7 @@ func Test_getTabletsWithPromotionRules(t *testing.T) { filteredTablets: nil, }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := GetDurabilityPolicy(DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res := getTabletsWithPromotionRules(durability, tt.tablets, tt.rule) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 1a4735b1c82..5438c2dd1c3 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -100,6 +100,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/ptr" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/flagutil" @@ -1818,7 +1819,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags keyspaceType := subFlags.String("keyspace_type", "", "Specifies the type of the keyspace") baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") - durabilityPolicy := subFlags.String("durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + durabilityPolicy := subFlags.String("durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") sidecarDBName := subFlags.String("sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") if err := subFlags.Parse(args); err != nil { return err @@ -1840,7 +1841,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags var snapshotTime *vttime.Time if ktype == topodatapb.KeyspaceType_SNAPSHOT { - if *durabilityPolicy != "none" { + if *durabilityPolicy != reparentutil.DurabilityNone { return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "durability-policy cannot be specified while creating a snapshot keyspace") } if *baseKeyspace == "" { diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index d8ac8beccc1..a62bffe1178 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/wrangler" @@ -53,7 +54,7 @@ func TestAPI(t *testing.T) { defer server.Close() ks1 := &topodatapb.Keyspace{ - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, SidecarDbName: "_vt_sidecar_ks1", } diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index c061d54ebb3..ba11ab9e3f2 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/test" ) @@ -70,7 +71,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -89,7 +90,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, ShardPrimaryTermTimestamp: "2022-12-28 07:23:25.129898+00:00", - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -107,7 +108,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 4, @@ -129,7 +130,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 0, CountReplicas: 0, IsPrimary: 1, @@ -149,7 +150,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 0, CountReplicas: 3, IsPrimary: 1, @@ -169,7 +170,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 2, @@ -191,7 +192,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -212,7 +213,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -234,7 +235,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -256,7 +257,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -278,7 +279,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -315,7 +316,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -333,7 +334,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -355,7 +356,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -373,7 +374,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 102}, }, @@ -395,7 +396,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -413,7 +414,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -436,7 +437,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -454,7 +455,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -477,7 +478,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -495,7 +496,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -520,7 +521,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -542,7 +543,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 0, @@ -562,7 +563,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -583,7 +584,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 1, @@ -605,7 +606,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { }, // Snapshot Keyspace KeyspaceType: 1, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -643,7 +644,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -663,7 +664,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, IsInvalid: 1, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -680,7 +681,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, IsInvalid: 1, }, { TabletInfo: &topodatapb.Tablet{ @@ -722,7 +723,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, IsInvalid: 1, }}, keyspaceWanted: "ks", @@ -740,7 +741,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -758,7 +759,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, @@ -781,7 +782,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -799,7 +800,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index dda3ffaa9d2..a9d99b2ac80 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -48,7 +48,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, }, keyspaceWanted: nil, semiSyncAckersWanted: 1, @@ -72,12 +72,12 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, }, semiSyncAckersWanted: 0, }, { diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go index 5cbe139728b..09d0535d132 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" @@ -36,15 +37,15 @@ import ( var ( keyspaceDurabilityNone = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, } keyspaceDurabilitySemiSync = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, } keyspaceDurabilityTest = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "test", + DurabilityPolicy: reparentutil.DurabilityTest, } keyspaceSnapshot = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, @@ -106,7 +107,7 @@ func TestRefreshAllKeyspaces(t *testing.T) { // Set clusters to watch to watch all keyspaces clustersToWatch = nil // Change the durability policy of ks1 - reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync") + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", reparentutil.DurabilitySemiSync) require.NoError(t, RefreshAllKeyspacesAndShards(context.Background())) // Verify that all the keyspaces are correctly reloaded @@ -144,7 +145,7 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: reparentutil.DurabilitySemiSync, }, keyspaceWanted: nil, err: "", @@ -169,12 +170,12 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: reparentutil.DurabilityNone, }, err: "", }, { diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 3167be5e512..548b86a1f72 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -60,7 +60,7 @@ func TestEmergencyReparentShard(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{ @@ -211,7 +211,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) moreAdvancedReplica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.Replicating = true diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 1894c6bb4eb..feff3919685 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" @@ -60,7 +61,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell2", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -177,7 +178,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -312,7 +313,7 @@ func TestPlannedReparentInitialization(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -691,7 +692,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { // Create a primary, a couple good replicas primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // old primary primary.FakeMysqlDaemon.ReadOnly = false diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index b199a64340a..35ae15a960f 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -141,7 +141,7 @@ func TestReparentTablet(t *testing.T) { } primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) replica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // mark the primary inside the shard if _, err := ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error { @@ -197,7 +197,7 @@ func TestSetReplicationSource(t *testing.T) { require.NoError(t, err, "CreateShard failed") primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) // mark the primary inside the shard _, err = ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error {