From 0adb706b5799fb655d013104f9fffcccce5b4831 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Wed, 21 Feb 2024 15:32:35 -0500 Subject: [PATCH] [e2e] More vtctldclient updates in tests (#15276) Signed-off-by: Andrew Mason --- .../endtoend/cluster/vtctldclient_process.go | 51 ++++++++++++++++++ go/test/endtoend/keyspace/keyspace_test.go | 52 +++++++++--------- .../reparent/newfeaturetest/reparent_test.go | 6 +-- .../reparent/plannedreparent/reparent_test.go | 26 ++++----- go/test/endtoend/reparent/utils/utils.go | 53 ++++++++----------- .../endtoend/sharded/sharded_keyspace_test.go | 18 +++---- .../buffer/reparent/failover_buffer_test.go | 6 +-- .../buffer/reshard/sharded_buffer_test.go | 9 ++-- .../endtoend/tabletmanager/commands_test.go | 37 +++++++------ .../tabletmanager/custom_rule_topo_test.go | 2 +- .../tabletmanager/primary/tablet_test.go | 23 ++++---- .../tabletmanager/tablet_health_test.go | 43 +++++++-------- go/test/endtoend/tabletmanager/tablet_test.go | 6 +-- .../throttler_topo/throttler_test.go | 8 +-- go/test/endtoend/topoconncache/main_test.go | 12 ++--- .../topoconncache/topo_conn_cache_test.go | 4 +- go/test/endtoend/vault/vault_test.go | 2 +- .../endtoend/versionupgrade/upgrade_test.go | 2 +- .../vtgate/createdb_plugin/main_test.go | 4 +- .../endtoend/vtgate/foreignkey/main_test.go | 2 +- .../foreignkey/stress/fk_stress_test.go | 8 +-- go/test/endtoend/vtgate/gen4/main_test.go | 4 +- go/test/endtoend/vtgate/main_test.go | 4 +- .../queries/informationschema/main_test.go | 4 +- .../queries/lookup_queries/main_test.go | 2 +- .../reservedconn/reconnect1/main_test.go | 12 ++--- .../reservedconn/reconnect2/main_test.go | 4 +- .../reservedconn/reconnect3/main_test.go | 2 +- .../reservedconn/reconnect4/main_test.go | 2 +- go/test/endtoend/vtgate/schema/schema_test.go | 30 +++++------ .../sharded_prs/st_sharded_test.go | 2 +- .../tablet_healthcheck/reparent_test.go | 6 +-- .../correctness_test.go | 2 +- go/test/endtoend/vtorc/api/api_test.go | 2 +- go/test/endtoend/vtorc/general/vtorc_test.go | 14 ++--- .../primaryfailure/primary_failure_test.go | 14 ++--- go/test/endtoend/vtorc/utils/utils.go | 24 +++------ 37 files changed, 262 insertions(+), 240 deletions(-) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 959ab5a93b9..c5afd8f1220 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -97,6 +98,11 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str return vtctldclient } +// ApplyRoutingRules applies the given routing rules. +func (vtctldclient *VtctldClientProcess) ApplyRoutingRules(json string) error { + return vtctldclient.ExecuteCommand("ApplyRoutingRules", "--rules", json) +} + type ApplySchemaParams struct { DDLStrategy string MigrationContext string @@ -213,6 +219,51 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid return err } +// GetKeyspace executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetKeyspace(keyspace string) (*vtctldatapb.Keyspace, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetKeyspace", keyspace) + if err != nil { + return nil, err + } + + var ks vtctldatapb.Keyspace + err = json2.Unmarshal([]byte(data), &ks) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse keyspace output: %s", data) + } + return &ks, nil +} + +// GetShard executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetShard(keyspace string, shard string) (*vtctldatapb.Shard, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard)) + if err != nil { + return nil, err + } + + var si vtctldatapb.Shard + err = json2.Unmarshal([]byte(data), &si) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse shard output: %s", data) + } + return &si, nil +} + +// GetTablet executes vtctldclient command to get a tablet, and parses the response. +func (vtctldclient *VtctldClientProcess) GetTablet(alias string) (*topodatapb.Tablet, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetTablet", alias) + if err != nil { + return nil, err + } + + var tablet topodatapb.Tablet + err = json2.Unmarshal([]byte(data), &tablet) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse tablet output: %s", data) + } + return &tablet, nil +} + // OnlineDDLShowRecent responds with recent schema migration list func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) { return vtctldclient.ExecuteCommandWithOutput( diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 142e4b4b442..7f7d4198135 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -21,17 +21,18 @@ import ( "encoding/json" "flag" "os" - "strings" "testing" - "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/vt/key" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/key" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( @@ -41,7 +42,7 @@ var ( cell = "zone1" cell2 = "zone2" hostname = "localhost" - servedTypes = map[topodata.TabletType]bool{topodata.TabletType_PRIMARY: true, topodata.TabletType_REPLICA: true, topodata.TabletType_RDONLY: true} + servedTypes = map[topodatapb.TabletType]bool{topodatapb.TabletType_PRIMARY: true, topodatapb.TabletType_REPLICA: true, topodatapb.TabletType_RDONLY: true} sqlSchema = `create table vt_insert_test ( id bigint auto_increment, msg varchar(64), @@ -152,29 +153,31 @@ func TestDurabilityPolicyField(t *testing.T) { out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", "--durability-policy=semi_sync", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) checkDurabilityPolicy(t, "semi_sync") - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) } func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { - var keyspace topodata.Keyspace - out, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "ks_durability") - require.NoError(t, err, out) - err = json.Unmarshal([]byte(out), &keyspace) + ks, err := clusterForKSTest.VtctldClientProcess.GetKeyspace("ks_durability") require.NoError(t, err) - require.Equal(t, keyspace.DurabilityPolicy, durabilityPolicy) + require.Equal(t, ks.Keyspace.DurabilityPolicy, durabilityPolicy) } func TestGetSrvKeyspaceNames(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) + data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) - assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName) - assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName) + + var namesByCell = map[string]*vtctldatapb.GetSrvKeyspaceNamesResponse_NameList{} + err = json2.Unmarshal([]byte(data), &namesByCell) + require.NoError(t, err) + + assert.Contains(t, namesByCell[cell].Names, keyspaceUnshardedName) + assert.Contains(t, namesByCell[cell].Names, keyspaceShardedName) } func TestGetSrvKeyspacePartitions(t *testing.T) { @@ -210,7 +213,7 @@ func TestShardNames(t *testing.T) { defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, keyspaceShardedName) require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace + var srvKeyspace topodatapb.SrvKeyspace err = json.Unmarshal([]byte(output), &srvKeyspace) require.Nil(t, err) @@ -218,12 +221,7 @@ func TestShardNames(t *testing.T) { func TestGetKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", keyspaceUnshardedName) - require.Nil(t, err) - - var keyspace topodata.Keyspace - - err = json.Unmarshal([]byte(output), &keyspace) + _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } @@ -390,7 +388,7 @@ func TestKeyspaceToShardName(t *testing.T) { // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { shardKIDs := shardKIdMap[shardRef.Name] for _, kid := range shardKIDs { @@ -405,7 +403,7 @@ func TestKeyspaceToShardName(t *testing.T) { srvKeyspace = getSrvKeyspace(t, cell, keyspaceUnshardedName) for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { assert.Equal(t, shardRef.Name, keyspaceUnshardedName) } @@ -420,10 +418,10 @@ func packKeyspaceID(keyspaceID uint64) []byte { return (keybytes[:]) } -func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodata.SrvKeyspace { +func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodatapb.SrvKeyspace { output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, ksname) require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace + var srvKeyspace topodatapb.SrvKeyspace err = json.Unmarshal([]byte(output), &srvKeyspace) require.Nil(t, err) diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index b570509f1a7..44cd89a306a 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -40,7 +40,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -139,10 +139,10 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { utils.CheckPrimaryTablet(t, clusterInstance, primary) // Change replica's type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) // Change tablets type from rdonly back to replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") require.NoError(t, err) } diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 6aa5972b928..07e488b52d0 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -44,7 +44,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // We cannot change a primary to spare - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") require.Error(t, err, out) require.Contains(t, out, "type change PRIMARY -> SPARE is not an allowed transition for ChangeTabletType") } @@ -92,7 +92,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -258,13 +258,13 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus if downPrimary { err := tablets[0].VttabletProcess.TearDownWithTimeout(30 * time.Second) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", - "--allow_primary", tablets[0].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", + "--allow-primary", tablets[0].Alias) require.NoError(t, err) } // update topology with the new server - err := clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", + err := clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablets[1].Alias) require.NoError(t, err) @@ -318,7 +318,7 @@ func TestReparentWithDownReplica(t *testing.T) { // We have to StartReplication on tablets[2] since the MySQL instance is restarted and does not have replication running // We earlier used to rely on replicationManager to fix this but we have disabled it in our testing environment for latest versions of vttablet and vtctl. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) // wait until it gets the data @@ -338,9 +338,9 @@ func TestChangeTypeSemiSync(t *testing.T) { primary, replica, rdonly1, rdonly2 := tablets[0], tablets[1], tablets[2], tablets[3] // Updated rdonly tablet and set tablet type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") require.NoError(t, err) utils.ValidateTopology(t, clusterInstance, true) @@ -349,7 +349,7 @@ func TestChangeTypeSemiSync(t *testing.T) { // Stop replication on rdonly1, to make sure when we make it replica it doesn't start again. // Note we do a similar test for replica -> rdonly below. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) require.NoError(t, err) // Check semi-sync on replicas. @@ -364,27 +364,27 @@ func TestChangeTypeSemiSync(t *testing.T) { utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "OFF") // Change replica to rdonly while replicating, should turn off semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) utils.CheckDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "OFF") utils.CheckDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "OFF") // Change rdonly1 to replica, should turn on semi-sync, and not start replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "ON") utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") utils.CheckReplicaStatus(ctx, t, rdonly1) // Now change from replica back to rdonly, make sure replication is still not enabled. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF") utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") utils.CheckReplicaStatus(ctx, t, rdonly1) // Change rdonly2 to replica, should turn on semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "ON") utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON") diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 675648dcf37..790fd0028e2 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -34,7 +34,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/tabletconn" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -185,7 +184,7 @@ func setupShard(ctx context.Context, t *testing.T, clusterInstance *cluster.Loca } // Initialize shard - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) require.NoError(t, err) ValidateTopology(t, clusterInstance, true) @@ -306,21 +305,21 @@ func PrsAvoid(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *c // PrsWithTimeout runs PRS func PrsWithTimeout(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, avoid bool, actionTimeout, waitTimeout string) (string, error) { args := []string{ - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} + "PlannedReparentShard", + fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} if actionTimeout != "" { args = append(args, "--action_timeout", actionTimeout) } if waitTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitTimeout) + args = append(args, "--wait-replicas-timeout", waitTimeout) } if avoid { - args = append(args, "--avoid_tablet") + args = append(args, "--avoid-primary") } else { - args = append(args, "--new_primary") + args = append(args, "--new-primary") } args = append(args, tab.Alias) - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) return out, err } @@ -335,15 +334,15 @@ func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster. if timeout != "" { args = append(args, "--action_timeout", timeout) } - args = append(args, "EmergencyReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) + args = append(args, "EmergencyReparentShard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) if tab != nil { - args = append(args, "--new_primary", tab.Alias) + args = append(args, "--new-primary", tab.Alias) } if waitReplicasTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitReplicasTimeout) + args = append(args, "--wait-replicas-timeout", waitReplicasTimeout) } if preventCrossCellPromotion { - args = append(args, "--prevent_cross_cell_promotion=true") + args = append(args, "--prevent-cross-cell-promotion") } if len(tabletsToIgnore) != 0 { tabsString := "" @@ -354,9 +353,9 @@ func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster. tabsString = tabsString + "," + vttablet.Alias } } - args = append(args, "--ignore_replicas", tabsString) + args = append(args, "--ignore-replicas", tabsString) } - return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + return clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) } // ErsWithVtctl runs ERS via vtctl binary @@ -374,10 +373,10 @@ func ValidateTopology(t *testing.T, clusterInstance *cluster.LocalProcessCluster args := []string{"Validate"} if pingTablets { - args = append(args, "--", "--ping-tablets=true") + args = append(args, "--ping-tablets") } - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) - require.Empty(t, out) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) + require.Contains(t, out, "no issues found") require.NoError(t, err) } @@ -398,17 +397,14 @@ func ConfirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu // ConfirmOldPrimaryIsHangingAround confirms that the old primary is hanging around func ConfirmOldPrimaryIsHangingAround(t *testing.T, clusterInstance *cluster.LocalProcessCluster) { - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") require.Error(t, err) require.Contains(t, out, "already has primary") } // CheckPrimaryTablet makes sure the tablet type is primary, and its health check agrees. func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletInfo.GetType()) @@ -424,10 +420,7 @@ func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessClust // isHealthyPrimaryTablet will return if tablet is primary AND healthy. func isHealthyPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) bool { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.Nil(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.Nil(t, err) if tabletInfo.GetType() != topodatapb.TabletType_PRIMARY { return false @@ -541,9 +534,9 @@ func ResurrectTablet(ctx context.Context, t *testing.T, clusterInstance *cluster // DeleteTablet is used to delete the given tablet func DeleteTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand( - "DeleteTablet", "--", - "--allow_primary", + err := clusterInstance.VtctldClientProcess.ExecuteCommand( + "DeleteTablets", + "--allow-primary", tab.Alias) require.NoError(t, err) } @@ -629,7 +622,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces // make sure the primary health stream says it's the primary too // (health check is disabled on these servers, force it first) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) require.NoError(t, err) shrs, err := clusterInstance.StreamTabletHealth(context.Background(), tablet, 1) diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 857dc455206..f311404ad7e 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -108,9 +108,9 @@ func TestShardedKeyspace(t *testing.T) { shard1Primary := shard1.Vttablets[0] shard2Primary := shard2.Vttablets[0] - err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) require.Nil(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -125,7 +125,7 @@ func TestShardedKeyspace(t *testing.T) { _, err = shard2Primary.VttabletProcess.QueryTablet(sqlSchemaReverse, keyspaceName, true) require.Nil(t, err) - if err = clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { + if err = clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { log.Error(err.Error()) return } @@ -136,13 +136,13 @@ func TestShardedKeyspace(t *testing.T) { shard2Primary.Alias, shard2.Vttablets[1].Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard1Primary.Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard2Primary.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard1Primary.Alias, "true") + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard2Primary.Alias, "true") _, _ = shard1Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (1, 'test 1')", keyspaceName, true) _, _ = shard2Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (10, 'test 10')", keyspaceName, true) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err) rows, err := shard1Primary.VttabletProcess.QueryTablet("select id, msg from vt_select_test order by id", keyspaceName, true) @@ -164,9 +164,9 @@ func TestShardedKeyspace(t *testing.T) { assert.Contains(t, output, shard1Primary.Alias+": CREATE TABLE") assert.Contains(t, output, shard2Primary.Alias+": CREATE TABLE") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) require.Nil(t, err) err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidatePermissionsShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) @@ -184,7 +184,7 @@ func TestShardedKeyspace(t *testing.T) { func reloadSchemas(t *testing.T, aliases ...string) { for _, alias := range aliases { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { assert.Fail(t, "Unable to reload schema") } diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go index 2be57120050..486dc3ef9e5 100644 --- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go @@ -83,7 +83,7 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro require.NoError(t, err) // Notify the new vttablet primary about the reparent. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) } @@ -92,9 +92,9 @@ func failoverPlannedReparenting(t *testing.T, clusterInstance *cluster.LocalProc reads.ExpectQueries(10) writes.ExpectQueries(10) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceUnshardedName, "0"), - "--new_primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + "--new-primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go index ae922108012..d58d8901165 100644 --- a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go @@ -68,9 +68,8 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false) require.NoError(t, err) workflowName := "buf2buf" - workflow := fmt.Sprintf("%s.%s", keyspaceName, "buf2buf") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards", "0", "--target_shards", "-80,80-", "Create", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "Create", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--source-shards", "0", "--target-shards", "-80,80-") require.NoError(t, err) // Execute the resharding operation @@ -78,13 +77,13 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp writes.ExpectQueries(25) waitForLowLag(t, clusterInstance, keyspaceName, workflowName) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly,replica", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=rdonly,replica") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=primary") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "Complete", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--target-keyspace", keyspaceName, "--workflow", workflowName, "Complete") require.NoError(t, err) } diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 1a2d2424cb4..537a3b9d0fc 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -62,52 +62,52 @@ func TestTabletCommands(t *testing.T) { // make sure direct dba queries work sql := "select * from t1" - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", "--json", primaryTablet.Alias, sql) + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", "--json", primaryTablet.Alias, sql) require.Nil(t, err) assertExecuteFetch(t, result) // check Ping / RefreshState / RefreshStateByShard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", "--", "--cells="+cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", "--cells", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") // Check basic actions. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadOnly", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "false") require.Nil(t, err, "error should be Nil") qr := utils.Exec(t, conn, "show variables like 'read_only'") got := fmt.Sprintf("%v", qr.Rows) want := "[[VARCHAR(\"read_only\") VARCHAR(\"ON\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.Nil(t, err, "error should be Nil") qr = utils.Exec(t, conn, "show variables like 'read_only'") got = fmt.Sprintf("%v", qr.Rows) want = "[[VARCHAR(\"read_only\") VARCHAR(\"OFF\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets=true") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", "--", "--ping-tablets=true", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", "--ping-tablets", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=false", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=true", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") } @@ -142,14 +142,13 @@ func assertExecuteFetch(t *testing.T, qr string) { // ActionAndTimeout test func TestActionAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("Sleep", primaryTablet.Alias, "5s") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("SleepTablet", primaryTablet.Alias, "5s") require.Nil(t, err) time.Sleep(1 * time.Second) // try a frontend RefreshState that should timeout as the tablet is busy running the other one - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", "--", primaryTablet.Alias, "--wait-time", "2s") + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias, "--wait_timeout", "2s") assert.Error(t, err, "timeout as tablet is in Sleep") } @@ -207,14 +206,14 @@ func TestShardReplicationFix(t *testing.T) { assertNodeCount(t, result, int(3)) // Manually add a bogus entry to the replication graph, and check it is removed by ShardReplicationFix - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) require.Nil(t, err, "error should be Nil") result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") assertNodeCount(t, result, int(4)) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") @@ -224,7 +223,7 @@ func TestShardReplicationFix(t *testing.T) { func TestGetSchema(t *testing.T) { defer cluster.PanicHandler(t) - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", "--", + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", fmt.Sprintf("%s-%d", clusterInstance.Cell, primaryTablet.TabletUID)) require.Nil(t, err) diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index aa09a99e0fe..0c6e056af36 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -74,7 +74,7 @@ func TestTopoCustomRule(t *testing.T) { err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") // And wait until the query is working. diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index f6255b1f71a..297e5540fac 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -121,16 +120,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -138,16 +137,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { checkTabletType(t, replicaTablet.Alias, "PRIMARY") // Come back to the original tablet. - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -162,7 +161,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { // See StreamHealthResponse.primary_term_start_timestamp for details. // Make replica as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING") @@ -212,7 +211,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { streamHealthRes2.GetPrimaryTermStartTimestamp())) // Reset primary - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) err = primaryTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) @@ -232,11 +231,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 7dc4bcd97d2..c8eb43b682f 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -30,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" @@ -85,7 +84,7 @@ func TestTabletReshuffle(t *testing.T) { require.NoError(t, err) assertExcludeFields(t, string(result)) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", rTablet.Alias) assert.Error(t, err, "cannot perform backup without my.cnf") killTablets(rTablet) @@ -114,7 +113,7 @@ func TestHealthCheck(t *testing.T) { require.NoError(t, err) defer conn.Close() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) @@ -123,9 +122,9 @@ func TestHealthCheck(t *testing.T) { utils.Exec(t, conn, "stop slave") // stop replication, make sure we don't go unhealthy. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) // make sure the health stream is updated @@ -136,9 +135,9 @@ func TestHealthCheck(t *testing.T) { } // then restart replication, make sure we stay healthy - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) @@ -173,16 +172,16 @@ func TestHealthCheck(t *testing.T) { // On a MySQL restart, it comes up as a read-only tablet (check default.cnf file). // We have to explicitly set it to read-write otherwise heartbeat writer is unable // to write the heartbeats - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.NoError(t, err) // explicitly start replication on all of the replicas to avoid any test flakiness as they were all // replicating from the primary instance - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) time.Sleep(tabletHealthcheckRefreshInterval) @@ -348,11 +347,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() @@ -398,16 +393,16 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // Change from rdonly to drained and stop replication. The tablet will stay // healthy, and the query service is still running. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") require.NoError(t, err) // Trying to drain the same tablet again, should error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") assert.Error(t, err, "already drained") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) require.NoError(t, err) // Trigger healthcheck explicitly to avoid waiting for the next interval. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkTabletType(t, rdonlyTablet.Alias, "DRAINED") @@ -417,11 +412,11 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { require.NoError(t, err) // Restart replication. Tablet will become healthy again. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkHealth(t, rdonlyTablet.HTTPPort, false) } @@ -434,7 +429,7 @@ func killTablets(tablets ...*cluster.Vttablet) { defer wg.Done() _ = tablet.VttabletProcess.TearDown() _ = tablet.MysqlctlProcess.Stop() - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) }(tablet) } wg.Wait() diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 6212b4a418b..830502268d1 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -47,7 +47,7 @@ func TestEnsureDB(t *testing.T) { require.NoError(t, err) // Make it the primary. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.EqualError(t, err, "exit status 1") // It is still NOT_SERVING because the db is read-only. @@ -56,8 +56,8 @@ func TestEnsureDB(t *testing.T) { assert.Contains(t, status, "read-only") // Switch to read-write and verify that we go serving. - // Note: for TabletExternallyReparented, we expect SetReadWrite to be called by the user - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + // Note: for TabletExternallyReparented, we expect SetWritable to be called by the user + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) err = tablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 7c0f05bdcc2..9824f28ae2b 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -370,7 +370,7 @@ func TestLag(t *testing.T) { defer clusterInstance.EnableVTOrcRecoveries(t) t.Run("stopping replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { @@ -415,7 +415,7 @@ func TestLag(t *testing.T) { }) t.Run("starting replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) { @@ -439,7 +439,7 @@ func TestLag(t *testing.T) { func TestNoReplicas(t *testing.T) { defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) // This makes no REPLICA servers available. We expect something like: @@ -447,7 +447,7 @@ func TestNoReplicas(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("restoring to REPLICA", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") assert.NoError(t, err) waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 4c17481ec84..26eb3918a0b 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -193,14 +193,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -211,7 +211,7 @@ func TestMain(m *testing.M) { } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -219,14 +219,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 504ca218047..4ffcc309e29 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -74,7 +74,7 @@ func deleteCell(t *testing.T) { deleteTablet(t, shard2Rdonly) // Delete cell2 info from topo - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--", "--force", cell2) + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--force", cell2) t.Log(res) require.NoError(t, err) @@ -111,7 +111,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 684a374707d..f8e19c07a0c 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -283,7 +283,7 @@ func initializeClusterLate(t *testing.T) { tablet.MysqlctlProcess.ExtraArgs = append(tablet.MysqlctlProcess.ExtraArgs, mysqlctlArg...) } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 87f7f9e8675..181b5dfc9ad 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -148,7 +148,7 @@ func TestDeploySchema(t *testing.T) { { sqlQuery := fmt.Sprintf(createTable, tableName) - result, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ""}) + result, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ""}) require.Nil(t, err, result) } for i := range clusterInstance.Keyspaces[0].Shards { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index e712fee7b36..5bfec3890b5 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -164,8 +164,8 @@ func shutdown(t *testing.T, ksName string) { } require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", ksName)) + clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", ksName)) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph")) + clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph")) } diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index 483c1d05e80..63590df9247 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -139,7 +139,7 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 60eff73b820..341db836fd8 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -597,7 +597,7 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { artifacts := textutil.SplitDelimitedList(row.AsString("artifacts", "")) for _, artifact := range artifacts { t.Run(artifact, func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) require.NoError(t, err) }) } @@ -781,7 +781,7 @@ func createInitialSchema(t *testing.T, tcase *testCase) { t.Run("dropping tables", func(t *testing.T) { for _, tableName := range reverseTableNames { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) require.NoError(t, err) } }) @@ -808,7 +808,7 @@ func createInitialSchema(t *testing.T, tcase *testCase) { } b.WriteString(";") } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, b.String()) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, b.String()) require.NoError(t, err) }) if tcase.preStatement != "" { @@ -862,7 +862,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 378b2d2969e..4c94e8e2ec8 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -102,12 +102,12 @@ func TestMain(m *testing.M) { } // apply routing rules - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 12abcf4dd01..b276508f269 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -79,12 +79,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - _, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 06c5b188d18..3696617281e 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -78,12 +78,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index c385941502a..9486dc194ff 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -74,7 +74,7 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 11325a0f2f8..491ce6bc6ab 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -133,7 +133,7 @@ func TestServingChange(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -143,12 +143,12 @@ func TestServingChange(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present @@ -174,7 +174,7 @@ func TestServingChangeStreaming(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -192,12 +192,12 @@ func TestServingChangeStreaming(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index b66bb15dbd5..a448574c282 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -129,7 +129,7 @@ func TestTabletChange(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. @@ -150,7 +150,7 @@ func TestTabletChangeStreaming(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 25af85acc00..677c24666b2 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -102,7 +102,7 @@ func TestMysqlDownServingChange(t *testing.T) { require.NoError(t, primaryTablet.MysqlctlProcess.Stop()) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index 28367cd597a..1dc53a89506 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -104,7 +104,7 @@ func TestVttabletDownServingChange(t *testing.T) { // kill vttablet process _ = primaryTablet.VttabletProcess.TearDown() require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 04d91d8d978..14b6c13034e 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -120,7 +120,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" // nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -135,7 +135,7 @@ func testWithInitialSchema(t *testing.T) { // testWithAlterSchema if we alter schema and then apply, the resultant schema should match across shards func testWithAlterSchema(t *testing.T) { sqlQuery := fmt.Sprintf(alterTable, fmt.Sprintf("vt_select_test_%02d", 3), "msg") - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) matchSchema(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, clusterInstance.Keyspaces[0].Shards[1].Vttablets[0].VttabletProcess.TabletPath) } @@ -143,7 +143,7 @@ func testWithAlterSchema(t *testing.T) { // testWithAlterDatabase tests that ALTER DATABASE is accepted by the validator. func testWithAlterDatabase(t *testing.T) { sql := "create database alter_database_test; alter database alter_database_test default character set = utf8mb4; drop database alter_database_test" - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sql) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sql) assert.NoError(t, err) } @@ -157,7 +157,7 @@ func testWithAlterDatabase(t *testing.T) { // See: https://github.com/vitessio/vitess/issues/1731#issuecomment-222914389 func testWithDropCreateSchema(t *testing.T) { dropCreateTable := fmt.Sprintf("DROP TABLE vt_select_test_%02d ;", 2) + fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", 2)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropCreateTable) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropCreateTable) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -186,10 +186,10 @@ func testWithAutoSchemaFromChangeDir(t *testing.T) { // matchSchema schema for supplied tablets should match func matchSchema(t *testing.T, firstTablet string, secondTablet string) { - firstShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) + firstShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) require.Nil(t, err) - secondShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) + secondShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) require.Nil(t, err) assert.Equal(t, firstShardSchema, secondShardSchema) @@ -203,12 +203,12 @@ func matchSchema(t *testing.T, firstTablet string, secondTablet string) { // is the MySQL behavior the user expects. func testDropNonExistentTables(t *testing.T) { dropNonExistentTable := "DROP TABLE nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", dropNonExistentTable, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", dropNonExistentTable, keyspaceName) require.Error(t, err) assert.True(t, strings.Contains(output, "Unknown table")) dropIfExists := "DROP TABLE IF EXISTS nonexistent_table;" - err = clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropIfExists) + err = clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropIfExists) require.Nil(t, err) checkTables(t, totalTableCount) @@ -219,7 +219,7 @@ func testDropNonExistentTables(t *testing.T) { func testCreateInvalidView(t *testing.T) { for _, ddlStrategy := range []string{"direct", "direct -allow-zero-in-date"} { createInvalidView := "CREATE OR REPLACE VIEW invalid_view AS SELECT * FROM nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) require.Error(t, err) assert.Contains(t, output, "doesn't exist (errno 1146)") } @@ -228,25 +228,25 @@ func testCreateInvalidView(t *testing.T) { func testApplySchemaBatch(t *testing.T) { { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -291,7 +291,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { source := fmt.Sprintf("%s/0", keyspaceName) tabletAlias := clusterInstance.Keyspaces[0].Shards[shard].Vttablets[0].VttabletProcess.TabletPath - schema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) + schema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) require.Nil(t, err) resultMap := make(map[string]any) @@ -305,7 +305,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { // (The different charset won't be corrected on the destination shard // because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if // there are differences in the options e.g. the character set.) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", "--", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") require.Nil(t, err) output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CopySchemaShard", source, fmt.Sprintf("%s/%d", keyspaceName, shard)) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 6ff8e69bb52..09bd97eb9fe 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -181,7 +181,7 @@ func TestMain(m *testing.M) { // This is supposed to change the primary tablet in the shards, meaning that a different tablet // will be responsible for sending schema tracking updates. for _, shard := range clusterInstance.Keyspaces[0].Shards { - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) if err != nil { fmt.Println(err) return 1 diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index dbc46bdda77..d6357ce8f2a 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -145,11 +145,11 @@ func TestHealthCheckExternallyReparentNewTablet(t *testing.T) { tablet := addTablet(t, reparentTabletUID, reparentTabletType) // promote the new tablet to the primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.NoError(t, err) // update the new primary tablet to be read-write - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) // wait for the vtgate to finish updating the new primary tablet @@ -236,7 +236,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 9386c307a12..50529d9fdf9 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -234,7 +234,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.Nil(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 7dd5c50eefa..7b277fd7a0f 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -106,7 +106,7 @@ func TestAPIEndpoints(t *testing.T) { t.Run("Replication Analysis API", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // We know VTOrc won't fix this since we disabled global recoveries! diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index f7deffe20b3..d79e2964f3e 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -163,7 +163,7 @@ func TestVTOrcRepairs(t *testing.T) { t.Run("StopReplication", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // check replication is setup correctly @@ -300,7 +300,7 @@ func TestRepairAfterTER(t *testing.T) { } // TER to other tablet - _, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) + _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) utils.CheckReplication(t, clusterInfo, newPrimary, []*cluster.Vttablet{curPrimary}, 15*time.Second) @@ -404,11 +404,11 @@ func TestVTOrcWithPrs(t *testing.T) { // check that the replication is setup correctly before we failover utils.CheckReplication(t, clusterInfo, curPrimary, shard0.Vttablets, 10*time.Second) - output, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), - "--wait_replicas_timeout", "31s", - "--new_primary", replica.Alias) + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), + "--wait-replicas-timeout", "31s", + "--new-primary", replica.Alias) require.NoError(t, err, "error in PlannedReparentShard output - %s", output) time.Sleep(40 * time.Second) diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index e226e8d13ae..d91dadddcb4 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -113,7 +113,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { curPrimary := shard0.Vttablets[0] // Promote the first tablet as the primary - err := clusterInfo.ClusterInstance.VtctlclientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) + err := clusterInfo.ClusterInstance.VtctldClientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) require.NoError(t, err) // find the replica and rdonly tablets @@ -442,9 +442,9 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica and rdonly. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly.Alias) require.NoError(t, err) // check that aheadRdonly is able to replicate. We also want to add some queries to aheadRdonly which will not be there in replica and rdonly @@ -669,7 +669,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the crossCellReplica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) require.NoError(t, err) // check that rdonly and replica are able to replicate. We also want to add some queries to replica which will not be there in crossCellReplica @@ -679,7 +679,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the crossCellReplica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) require.NoError(t, err) // enable recoveries back on vtorc so that it can repair @@ -750,7 +750,7 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) // check that rdonly and crossCellReplica are able to replicate. We also want to add some queries to crossCenterReplica which will not be there in replica @@ -760,7 +760,7 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the replica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replica.Alias) require.NoError(t, err) // enable recoveries back on vtorc so that it can repair diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 11294319658..dca2c7b1e26 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -206,7 +205,7 @@ func shutdownVttablets(clusterInfo *VTOrcClusterInfo) error { // Remove the tablet record for this tablet } // Ignoring error here because some tests delete tablets themselves. - _ = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) + _ = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", vttablet.Alias) } clusterInfo.ClusterInstance.Keyspaces[0].Shards[0].Vttablets = nil return nil @@ -352,19 +351,16 @@ func ShardPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, keyspace *c if now.Sub(start) > time.Second*60 { assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace.Name, shard.Name)) - assert.Nil(t, err) + si, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetShard(keyspace.Name, shard.Name) + require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - assert.Nil(t, err) - if shardInfo.PrimaryAlias == nil { + if si.Shard.PrimaryAlias == nil { log.Warningf("Shard %v/%v has no primary yet, sleep for 1 second\n", keyspace.Name, shard.Name) time.Sleep(time.Second) continue } for _, tablet := range shard.Vttablets { - if tablet.Alias == topoproto.TabletAliasString(shardInfo.PrimaryAlias) { + if tablet.Alias == topoproto.TabletAliasString(si.Shard.PrimaryAlias) { return tablet } } @@ -381,12 +377,8 @@ func CheckPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *clu //log.Exitf("error") assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) - if topodatapb.TabletType_PRIMARY != tabletInfo.GetType() { log.Warningf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias) time.Sleep(time.Second) @@ -535,9 +527,9 @@ func validateTopology(t *testing.T, clusterInfo *VTOrcClusterInfo, pingTablets b var err error var output string if pingTablets { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate", "--", "--ping-tablets=true") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate", "--ping-tablets") } else { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") } if err != nil { log.Warningf("Validate failed, retrying, output - %s", output)