Skip to content

Commit

Permalink
[e2e] More vtctldclient updates in tests (#15276)
Browse files Browse the repository at this point in the history
Signed-off-by: Andrew Mason <[email protected]>
  • Loading branch information
Andrew Mason authored Feb 21, 2024
1 parent e163c9e commit 0adb706
Show file tree
Hide file tree
Showing 37 changed files with 262 additions and 240 deletions.
51 changes: 51 additions & 0 deletions go/test/endtoend/cluster/vtctldclient_process.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"vitess.io/vitess/go/vt/vterrors"

topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
)

// VtctldClientProcess is a generic handle for a running vtctldclient command .
Expand Down Expand Up @@ -97,6 +98,11 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str
return vtctldclient
}

// ApplyRoutingRules applies the given routing rules.
func (vtctldclient *VtctldClientProcess) ApplyRoutingRules(json string) error {
return vtctldclient.ExecuteCommand("ApplyRoutingRules", "--rules", json)
}

type ApplySchemaParams struct {
DDLStrategy string
MigrationContext string
Expand Down Expand Up @@ -213,6 +219,51 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid
return err
}

// GetKeyspace executes the vtctldclient command to get a shard, and parses the response.
func (vtctldclient *VtctldClientProcess) GetKeyspace(keyspace string) (*vtctldatapb.Keyspace, error) {
data, err := vtctldclient.ExecuteCommandWithOutput("GetKeyspace", keyspace)
if err != nil {
return nil, err
}

var ks vtctldatapb.Keyspace
err = json2.Unmarshal([]byte(data), &ks)
if err != nil {
return nil, vterrors.Wrapf(err, "failed to parse keyspace output: %s", data)
}
return &ks, nil
}

// GetShard executes the vtctldclient command to get a shard, and parses the response.
func (vtctldclient *VtctldClientProcess) GetShard(keyspace string, shard string) (*vtctldatapb.Shard, error) {
data, err := vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard))
if err != nil {
return nil, err
}

var si vtctldatapb.Shard
err = json2.Unmarshal([]byte(data), &si)
if err != nil {
return nil, vterrors.Wrapf(err, "failed to parse shard output: %s", data)
}
return &si, nil
}

// GetTablet executes vtctldclient command to get a tablet, and parses the response.
func (vtctldclient *VtctldClientProcess) GetTablet(alias string) (*topodatapb.Tablet, error) {
data, err := vtctldclient.ExecuteCommandWithOutput("GetTablet", alias)
if err != nil {
return nil, err
}

var tablet topodatapb.Tablet
err = json2.Unmarshal([]byte(data), &tablet)
if err != nil {
return nil, vterrors.Wrapf(err, "failed to parse tablet output: %s", data)
}
return &tablet, nil
}

// OnlineDDLShowRecent responds with recent schema migration list
func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) {
return vtctldclient.ExecuteCommandWithOutput(
Expand Down
52 changes: 25 additions & 27 deletions go/test/endtoend/keyspace/keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,18 @@ import (
"encoding/json"
"flag"
"os"
"strings"
"testing"

"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/vt/key"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/key"

topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
)

var (
Expand All @@ -41,7 +42,7 @@ var (
cell = "zone1"
cell2 = "zone2"
hostname = "localhost"
servedTypes = map[topodata.TabletType]bool{topodata.TabletType_PRIMARY: true, topodata.TabletType_REPLICA: true, topodata.TabletType_RDONLY: true}
servedTypes = map[topodatapb.TabletType]bool{topodatapb.TabletType_PRIMARY: true, topodatapb.TabletType_REPLICA: true, topodatapb.TabletType_RDONLY: true}
sqlSchema = `create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
Expand Down Expand Up @@ -152,29 +153,31 @@ func TestDurabilityPolicyField(t *testing.T) {
out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability")
require.NoError(t, err, out)

out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", "--durability-policy=semi_sync", "ks_durability")
out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability")
require.NoError(t, err, out)
checkDurabilityPolicy(t, "semi_sync")

out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability")
out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability")
require.NoError(t, err, out)
}

func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) {
var keyspace topodata.Keyspace
out, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "ks_durability")
require.NoError(t, err, out)
err = json.Unmarshal([]byte(out), &keyspace)
ks, err := clusterForKSTest.VtctldClientProcess.GetKeyspace("ks_durability")
require.NoError(t, err)
require.Equal(t, keyspace.DurabilityPolicy, durabilityPolicy)
require.Equal(t, ks.Keyspace.DurabilityPolicy, durabilityPolicy)
}

func TestGetSrvKeyspaceNames(t *testing.T) {
defer cluster.PanicHandler(t)
output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell)
data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell)
require.Nil(t, err)
assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName)
assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName)

var namesByCell = map[string]*vtctldatapb.GetSrvKeyspaceNamesResponse_NameList{}
err = json2.Unmarshal([]byte(data), &namesByCell)
require.NoError(t, err)

assert.Contains(t, namesByCell[cell].Names, keyspaceUnshardedName)
assert.Contains(t, namesByCell[cell].Names, keyspaceShardedName)
}

func TestGetSrvKeyspacePartitions(t *testing.T) {
Expand Down Expand Up @@ -210,20 +213,15 @@ func TestShardNames(t *testing.T) {
defer cluster.PanicHandler(t)
output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, keyspaceShardedName)
require.Nil(t, err)
var srvKeyspace topodata.SrvKeyspace
var srvKeyspace topodatapb.SrvKeyspace

err = json.Unmarshal([]byte(output), &srvKeyspace)
require.Nil(t, err)
}

func TestGetKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", keyspaceUnshardedName)
require.Nil(t, err)

var keyspace topodata.Keyspace

err = json.Unmarshal([]byte(output), &keyspace)
_, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName)
require.Nil(t, err)
}

Expand Down Expand Up @@ -390,7 +388,7 @@ func TestKeyspaceToShardName(t *testing.T) {

// for each served type PRIMARY REPLICA RDONLY, the shard ref count should match
for _, partition := range srvKeyspace.Partitions {
if partition.ServedType == topodata.TabletType_PRIMARY {
if partition.ServedType == topodatapb.TabletType_PRIMARY {
for _, shardRef := range partition.ShardReferences {
shardKIDs := shardKIdMap[shardRef.Name]
for _, kid := range shardKIDs {
Expand All @@ -405,7 +403,7 @@ func TestKeyspaceToShardName(t *testing.T) {
srvKeyspace = getSrvKeyspace(t, cell, keyspaceUnshardedName)

for _, partition := range srvKeyspace.Partitions {
if partition.ServedType == topodata.TabletType_PRIMARY {
if partition.ServedType == topodatapb.TabletType_PRIMARY {
for _, shardRef := range partition.ShardReferences {
assert.Equal(t, shardRef.Name, keyspaceUnshardedName)
}
Expand All @@ -420,10 +418,10 @@ func packKeyspaceID(keyspaceID uint64) []byte {
return (keybytes[:])
}

func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodata.SrvKeyspace {
func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodatapb.SrvKeyspace {
output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, ksname)
require.Nil(t, err)
var srvKeyspace topodata.SrvKeyspace
var srvKeyspace topodatapb.SrvKeyspace

err = json.Unmarshal([]byte(output), &srvKeyspace)
require.Nil(t, err)
Expand Down
6 changes: 3 additions & 3 deletions go/test/endtoend/reparent/newfeaturetest/reparent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) {
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})

// make tablets[1] a rdonly tablet.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
require.NoError(t, err)

// Confirm that replication is still working as intended
Expand Down Expand Up @@ -139,10 +139,10 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) {
utils.CheckPrimaryTablet(t, clusterInstance, primary)

// Change replica's type to rdonly
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly")
err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly")
require.NoError(t, err)

// Change tablets type from rdonly back to replica
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica")
require.NoError(t, err)
}
26 changes: 13 additions & 13 deletions go/test/endtoend/reparent/plannedreparent/reparent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) {
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

// We cannot change a primary to spare
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare")
out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare")
require.Error(t, err, out)
require.Contains(t, out, "type change PRIMARY -> SPARE is not an allowed transition for ChangeTabletType")
}
Expand Down Expand Up @@ -92,7 +92,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) {
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained")
err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained")
require.NoError(t, err)

utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down Expand Up @@ -258,13 +258,13 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus
if downPrimary {
err := tablets[0].VttabletProcess.TearDownWithTimeout(30 * time.Second)
require.NoError(t, err)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--",
"--allow_primary", tablets[0].Alias)
err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets",
"--allow-primary", tablets[0].Alias)
require.NoError(t, err)
}

// update topology with the new server
err := clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented",
err := clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented",
tablets[1].Alias)
require.NoError(t, err)

Expand Down Expand Up @@ -318,7 +318,7 @@ func TestReparentWithDownReplica(t *testing.T) {

// We have to StartReplication on tablets[2] since the MySQL instance is restarted and does not have replication running
// We earlier used to rely on replicationManager to fix this but we have disabled it in our testing environment for latest versions of vttablet and vtctl.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias)
err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias)
require.NoError(t, err)

// wait until it gets the data
Expand All @@ -338,9 +338,9 @@ func TestChangeTypeSemiSync(t *testing.T) {
primary, replica, rdonly1, rdonly2 := tablets[0], tablets[1], tablets[2], tablets[3]

// Updated rdonly tablet and set tablet type to rdonly
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly")
err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly")
require.NoError(t, err)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly")
require.NoError(t, err)

utils.ValidateTopology(t, clusterInstance, true)
Expand All @@ -349,7 +349,7 @@ func TestChangeTypeSemiSync(t *testing.T) {

// Stop replication on rdonly1, to make sure when we make it replica it doesn't start again.
// Note we do a similar test for replica -> rdonly below.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly1.Alias)
err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly1.Alias)
require.NoError(t, err)

// Check semi-sync on replicas.
Expand All @@ -364,27 +364,27 @@ func TestChangeTypeSemiSync(t *testing.T) {
utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "OFF")

// Change replica to rdonly while replicating, should turn off semi-sync, and restart replication.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly")
require.NoError(t, err)
utils.CheckDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "OFF")
utils.CheckDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "OFF")

// Change rdonly1 to replica, should turn on semi-sync, and not start replication.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica")
require.NoError(t, err)
utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "ON")
utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF")
utils.CheckReplicaStatus(ctx, t, rdonly1)

// Now change from replica back to rdonly, make sure replication is still not enabled.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly")
require.NoError(t, err)
utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF")
utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF")
utils.CheckReplicaStatus(ctx, t, rdonly1)

// Change rdonly2 to replica, should turn on semi-sync, and restart replication.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica")
err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica")
require.NoError(t, err)
utils.CheckDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "ON")
utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON")
Expand Down
Loading

0 comments on commit 0adb706

Please sign in to comment.