From 454c0698a1f32e506bc227e43f254672a48312d2 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Fri, 8 Dec 2023 18:41:48 +0100 Subject: [PATCH 01/21] Simplify VitessCluster constructor Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 67 ++++++++++++++----- go/test/endtoend/vreplication/fk_ext_test.go | 9 +-- go/test/endtoend/vreplication/fk_test.go | 6 +- .../endtoend/vreplication/materialize_test.go | 10 ++- go/test/endtoend/vreplication/migrate_test.go | 26 +++---- .../vreplication/movetables_buffering_test.go | 2 +- .../partial_movetables_seq_test.go | 5 +- .../vreplication/partial_movetables_test.go | 2 +- .../endtoend/vreplication/performance_test.go | 7 +- .../resharding_workflows_v2_test.go | 14 ++-- .../endtoend/vreplication/sidecardb_test.go | 11 +-- .../endtoend/vreplication/time_zone_test.go | 6 +- go/test/endtoend/vreplication/vdiff2_test.go | 7 +- .../vdiff_multiple_movetables_test.go | 7 +- .../vreplication/vreplication_test.go | 30 ++++----- .../vreplication/vschema_load_test.go | 8 +-- go/test/endtoend/vreplication/vstream_test.go | 15 ++--- 17 files changed, 115 insertions(+), 117 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 8993f1257da..32c639486cf 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -92,6 +92,7 @@ type VitessCluster struct { t *testing.T ClusterConfig *ClusterConfig Name string + CellNames []string Cells map[string]*Cell Topo *cluster.TopoProcess Vtctld *cluster.VtctldProcess @@ -332,9 +333,28 @@ func init() { externalClusterConfig = getClusterConfig(1, mainVtDataRoot+"/ext") } +type clusterOptions struct { + cells []string + clusterConfig *ClusterConfig +} + +func getClusterOptions(opts *clusterOptions) *clusterOptions { + if opts == nil { + opts = &clusterOptions{} + } + if opts.cells == nil { + opts.cells = []string{"zone1"} + } + if opts.clusterConfig == nil { + opts.clusterConfig = mainClusterConfig + } + return opts +} + // NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo -func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { - vc := &VitessCluster{t: t, Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} +func NewVitessCluster(t *testing.T, opts *clusterOptions) *VitessCluster { + opts = getClusterOptions(opts) + vc := &VitessCluster{t: t, Name: t.Name(), CellNames: opts.cells, Cells: make(map[string]*Cell), ClusterConfig: opts.clusterConfig} require.NotNil(t, vc) vc.CleanupDataroot(t, true) @@ -346,32 +366,45 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf err := topo.ManageTopoDir("mkdir", "/vitess/global") require.NoError(t, err) vc.Topo = topo - for _, cellName := range cellNames { + for _, cellName := range opts.cells { err := topo.ManageTopoDir("mkdir", "/vitess/"+cellName) require.NoError(t, err) } - vtctld := cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, + vc.setupVtctld() + vc.setupVtctl() + vc.setupVtctlClient() + vc.setupVtctldClient() + return vc +} + +func (vc *VitessCluster) setupVtctld() { + vc.Vtctld = cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir) - vc.Vtctld = vtctld - require.NotNil(t, vc.Vtctld) + require.NotNil(vc.t, vc.Vtctld) // use first cell as `-cell` - vc.Vtctld.Setup(cellNames[0], extraVtctldArgs...) + vc.Vtctld.Setup(vc.CellNames[0], extraVtctldArgs...) +} +func (vc *VitessCluster) setupVtctl() { vc.Vtctl = cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname) - require.NotNil(t, vc.Vtctl) - for _, cellName := range cellNames { + require.NotNil(vc.t, vc.Vtctl) + for _, cellName := range vc.CellNames { vc.Vtctl.AddCellInfo(cellName) - cell, err := vc.AddCell(t, cellName) - require.NoError(t, err) - require.NotNil(t, cell) + cell, err := vc.AddCell(vc.t, cellName) + require.NoError(vc.t, err) + require.NotNil(vc.t, cell) } +} +func (vc *VitessCluster) setupVtctlClient() { vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctlClient) + require.NotNil(vc.t, vc.VtctlClient) +} + +func (vc *VitessCluster) setupVtctldClient() { vc.VtctldClient = cluster.VtctldClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctldClient) - return vc + require.NotNil(vc.t, vc.VtctldClient) } // CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard, @@ -762,7 +795,7 @@ func (vc *VitessCluster) teardown() { } // TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown(t *testing.T) { +func (vc *VitessCluster) TearDown() { if debugMode { return } @@ -779,7 +812,7 @@ func (vc *VitessCluster) TearDown(t *testing.T) { } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) - vc.CleanupDataroot(t, false) + vc.CleanupDataroot(vc.t, false) } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index a06fafb257e..44f7a1d7f14 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -97,15 +97,16 @@ func TestFKExt(t *testing.T) { cellName := fkextConfig.cell cells := []string{cellName} - vc = NewVitessCluster(t, t.Name(), cells, fkextConfig.ClusterConfig) - - require.NotNil(t, vc) + vc = NewVitessCluster(t, &clusterOptions{ + cells: cells, + clusterConfig: fkextConfig.ClusterConfig, + }) allCellNames = cellName defaultCellName := cellName defaultCell = vc.Cells[defaultCellName] cell := vc.Cells[cellName] - defer vc.TearDown(t) + defer vc.TearDown() sourceKeyspace := fkextConfig.sourceKeyspaceName vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, "0", FKExtSourceVSchema, FKExtSourceSchema, 0, 0, 100, nil) diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 7d5f01c13db..56747bd6ef0 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -49,17 +49,15 @@ func TestFKWorkflow(t *testing.T) { defer func() { extraVTTabletArgs = nil }() cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, "TestFKWorkflow", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc) allCellNames = cellName defaultCellName := cellName defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "fksource" shardName := "0" - defer vc.TearDown(t) + defer vc.TearDown() cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index 63205a56c0a..0d437308fb2 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -64,9 +64,8 @@ const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2 // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters func testShardedMaterialize(t *testing.T, useVtctldClient bool) { defaultCellName := "zone1" - allCells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) ks1 := "ks1" ks2 := "ks2" shard := "0" @@ -74,7 +73,7 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) @@ -183,9 +182,8 @@ RETURN id * length(val); func testMaterialize(t *testing.T, useVtctldClient bool) { defaultCellName := "zone1" - allCells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMaterialize", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) sourceKs := "source" targetKs := "target" shard := "0" @@ -193,7 +191,7 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 75ab6a3151b..3b3bc8ebd9e 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -49,14 +49,12 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctlMigrate(t *testing.T) { defaultCellName := "zone1" - cells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) @@ -73,10 +71,8 @@ func TestVtctlMigrate(t *testing.T) { // create external cluster extCell := "extcell1" - extCells := []string{extCell} - extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{cells: []string{"extcell1"}, clusterConfig: externalClusterConfig}) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000, nil) @@ -176,14 +172,12 @@ func TestVtctlMigrate(t *testing.T) { // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctldMigrate(t *testing.T) { defaultCellName := "zone1" - cells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrateVtctld", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", @@ -202,9 +196,11 @@ func TestVtctldMigrate(t *testing.T) { // create external cluster extCell := "extcell1" extCells := []string{extCell} - extVc := NewVitessCluster(t, t.Name(), extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{ + cells: extCells, + clusterConfig: externalClusterConfig, + }) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 113587a1669..b16cd7c4cc3 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -15,7 +15,7 @@ func TestMoveTablesBuffering(t *testing.T) { defaultRdonly = 1 vc = setupMinimalCluster(t) defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() currentWorkflowType = wrangler.MoveTablesWorkflow setupMinimalCustomerKeyspace(t) diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index f8dc440b62d..348d2865654 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -169,13 +169,12 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa func (tc *vrepTestCase) teardown() { tc.vtgateConn.Close() - vc.TearDown(tc.t) + vc.TearDown() } func (tc *vrepTestCase) setupCluster() { - cells := []string{"zone1"} - tc.vc = NewVitessCluster(tc.t, tc.testName, cells, mainClusterConfig) + tc.vc = NewVitessCluster(tc.t, nil) vc = tc.vc // for backward compatibility since vc is used globally in this package require.NotNil(tc.t, tc.vc) tc.setupKeyspaces([]string{"commerce", "seqSrc"}) diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index d9573b50e4a..af36b2584dc 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -104,7 +104,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { }() vc = setupMinimalCluster(t) defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() setupMinimalCustomerKeyspace(t) // Move customer table from unsharded product keyspace to diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index 9e0ae797e72..a8f3470049d 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -50,13 +50,10 @@ create table customer(cid int, name varbinary(128), meta json default null, typ const sourceKs = "stress_src" const targetKs = "stress_tgt" - allCells := []string{defaultCellName} allCellNames = defaultCellName - vc = NewVitessCluster(t, "TestReplicationStress", allCells, mainClusterConfig) - require.NotNil(t, vc) - - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100, nil) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 401147a3887..bebbf280f54 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -297,7 +297,7 @@ func TestBasicV2Workflows(t *testing.T) { vc = setupCluster(t) defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() // Internal tables like the lifecycle ones for OnlineDDL should be ignored ddlSQL := "ALTER TABLE customer MODIFY cid bigint UNSIGNED" @@ -608,10 +608,9 @@ func testRestOfWorkflow(t *testing.T) { } func setupCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1", "zone2"} + vc = NewVitessCluster(t, &clusterOptions{cells: []string{"zone1", "zone2"}}) + defer vc.TearDown() - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) defaultCellName := "zone1" allCellNames = defaultCellName defaultCell = vc.Cells[defaultCellName] @@ -672,10 +671,9 @@ func setupCustomer2Keyspace(t *testing.T) { } func setupMinimalCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1"} + vc = NewVitessCluster(t, nil) + vc.TearDown() - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) defaultCellName := "zone1" allCellNames = defaultCellName defaultCell = vc.Cells[defaultCellName] @@ -715,7 +713,7 @@ func setupMinimalCustomerKeyspace(t *testing.T) { func TestSwitchReadsWritesInAnyOrder(t *testing.T) { vc = setupCluster(t) - defer vc.TearDown(t) + defer vc.TearDown() moveCustomerTableSwitchFlows(t, []*Cell{vc.Cells["zone1"]}, "zone1") } diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index ef05e051be2..81e79f98b54 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -58,15 +58,8 @@ func prs(t *testing.T, keyspace, shard string) { // TestSidecarDB launches a Vitess cluster and ensures that the expected sidecar tables are created. We also drop/alter // tables and ensure the next tablet init will recreate the sidecar database to the desired schema. func TestSidecarDB(t *testing.T) { - cells := []string{"zone1"} - - vc = NewVitessCluster(t, "TestSidecarDB", cells, mainClusterConfig) - require.NotNil(t, vc) - allCellNames = "zone1" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() keyspace := "product" shard := "0" diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2d0d1eeaf0b..2da2ad0e3a2 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -41,13 +41,11 @@ func TestMoveTablesTZ(t *testing.T) { ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", []string{"zone1"}, mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] cells := []*Cell{defaultCell} - defer vc.TearDown(t) - cell1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index d7b8cb6a47e..69afd1a99ba 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -120,15 +120,14 @@ func TestVDiff2(t *testing.T) { // This forces us to use multiple vstream packets even with small test tables. extraVTTabletArgs = []string{"--vstream_packet_size=1"} - vc = NewVitessCluster(t, "TestVDiff2", strings.Split(allCellNames, ","), mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, &clusterOptions{cells: strings.Split(allCellNames, ",")}) + defer vc.TearDown() + zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] zone3 := vc.Cells["zone3"] defaultCell = zone1 - defer vc.TearDown(t) - // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 434ea6db3e0..007f51c0f7d 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -33,9 +33,8 @@ import ( ) func TestMultipleConcurrentVDiffs(t *testing.T) { - cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, t.Name(), cells, mainClusterConfig) + cellName := "zone1" + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) allCellNames = cellName @@ -44,7 +43,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { sourceKeyspace := "product" shardName := "0" - defer vc.TearDown(t) + defer vc.TearDown() cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index abbdfe7f4dd..e3984ccb646 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -128,8 +128,8 @@ func TestVReplicationDDLHandling(t *testing.T) { newColumn := "ddltest" cell := "zone1" shard := "0" - vc = NewVitessCluster(t, t.Name(), []string{cell}, mainClusterConfig) - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() defaultCell = vc.Cells[cell] if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { @@ -234,8 +234,8 @@ func TestVreplicationCopyThrottling(t *testing.T) { cell := "zone1" table := "customer" shard := "0" - vc = NewVitessCluster(t, "TestVreplicationCopyThrottling", []string{cell}, mainClusterConfig) - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() defaultCell = vc.Cells[cell] // To test vstreamer source throttling for the MoveTables operation maxSourceTrxHistory := int64(5) @@ -306,9 +306,8 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { defaultCellName := "zone1" - allCells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) // Keep the cluster processes minimal to deal with CI resource constraints @@ -320,7 +319,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) } - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) @@ -432,15 +431,13 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} allCellNames = strings.Join(cells, ",") - vc = NewVitessCluster(t, "TestMultiCellVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, &clusterOptions{cells: cells}) + defer vc.TearDown() defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] keyspace := "product" shard := "0" - defer vc.TearDown(t) - cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) @@ -463,13 +460,12 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { func TestVStreamFlushBinlog(t *testing.T) { defaultCellName := "zone1" - allCells := []string{defaultCellName} allCellNames = defaultCellName workflow := "test_vstream_p2c" shard := "0" - vc = NewVitessCluster(t, "TestVStreamBinlogFlush", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] // Keep the cluster processes minimal (no rdonly and no replica tablets) @@ -637,8 +633,9 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { mainClusterConfig.vreplicationCompressGTID = false extraVTTabletArgs = oldVTTabletExtraArgs }() - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, &clusterOptions{cells: cells}) + defer vc.TearDown() + allCellNames = "zone1,zone2" defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] @@ -647,7 +644,6 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) - defer vc.TearDown(t) cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index a5cac4c68f8..8f66df82090 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -41,13 +41,9 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { extendedTimeout := defaultTimeout * 4 defaultCellName := "zone1" - allCells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVSchemaChanges", allCells, mainClusterConfig) - - require.NotNil(t, vc) - - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 5c5e6a80130..745de5987d5 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -44,14 +44,13 @@ import ( // - We stream only from the primary and while streaming we reparent to a replica and then back to the original primary func testVStreamWithFailover(t *testing.T, failover bool) { defaultCellName := "zone1" - cells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamWithFailover", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) defaultReplicas = 2 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) @@ -236,15 +235,14 @@ type numEvents struct { // tests the StopOnReshard flag func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID int) *numEvents { defaultCellName := "zone1" - allCells := []string{"zone1"} allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamStopOnReshard", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) @@ -387,15 +385,14 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents { defaultCellName := "zone1" allCellNames = defaultCellName - allCells := []string{allCellNames} - vc = NewVitessCluster(t, "VStreamCopyMultiKeyspaceReshard", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) ogdr := defaultReplicas defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func(dr int) { defaultReplicas = dr }(ogdr) - defer vc.TearDown(t) + defer vc.TearDown() defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) From 6b6593ad4449b539bf0f9d3d6964502767b6a5b6 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Mon, 11 Dec 2023 14:17:49 +0100 Subject: [PATCH 02/21] Fix FK test Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 3 +++ go/test/endtoend/vreplication/fk_test.go | 2 +- go/test/endtoend/vreplication/sidecardb_test.go | 2 +- .../endtoend/vreplication/vdiff_multiple_movetables_test.go | 4 +--- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 32c639486cf..e984a60050e 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -116,6 +116,9 @@ type Keyspace struct { VSchema string Schema string SidecarDBName string + + numReplicas int + numRDOnly int } // Shard represents a Vitess shard in a keyspace diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 56747bd6ef0..a7aca913856 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -48,7 +48,7 @@ func TestFKWorkflow(t *testing.T) { } defer func() { extraVTTabletArgs = nil }() - cellName := "zone" + cellName := "zone1" vc = NewVitessCluster(t, nil) allCellNames = cellName diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index 81e79f98b54..cea74626659 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -67,7 +67,7 @@ func TestSidecarDB(t *testing.T) { cell1 := vc.Cells[defaultCellName] tablet100 := fmt.Sprintf("%s-100", defaultCellName) tablet101 := fmt.Sprintf("%s-101", defaultCellName) - vc.AddKeyspace(t, []*Cell{cell1}, keyspace, shard, initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard] tablet100Port := shard0.Tablets[tablet100].Vttablet.Port tablet101Port := shard0.Tablets[tablet101].Vttablet.Port diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 007f51c0f7d..b40efd7f7a1 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -35,16 +35,14 @@ import ( func TestMultipleConcurrentVDiffs(t *testing.T) { cellName := "zone1" vc = NewVitessCluster(t, nil) + defer vc.TearDown() - require.NotNil(t, vc) allCellNames = cellName defaultCellName := cellName defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "product" shardName := "0" - defer vc.TearDown() - cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) From aaec9c5ac6a6344af823805722fffafb03c0336c Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Mon, 11 Dec 2023 14:19:01 +0100 Subject: [PATCH 03/21] Set vttablet db time zone as UTC on initialize: without that, timestamp columns are failing on e2e tests on Mac Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index e984a60050e..a76322b8cac 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -663,6 +663,11 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa if err := tablet.Vttablet.Setup(); err != nil { t.Fatalf(err.Error()) } + query := "SET GLOBAL time_zone = '+00:00';" + qr, err := tablet.Vttablet.QueryTablet(query, tablet.Vttablet.Keyspace, false) + if err != nil { + t.Fatalf("failed to set time_zone: %v, output: %v", err, qr) + } } } require.NotEqual(t, 0, primaryTabletUID, "Should have created a primary tablet") From 3193d4ad762316e7a49b96d860c78cae3f97caf5 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 15:43:23 +0100 Subject: [PATCH 04/21] Convert mixed case table names to all lower case, so it works also on Mac. We are not really testing case sensitiveness here Signed-off-by: Rohit Nayak --- .../vreplication/vplayer_flaky_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index dc11ac7bd9c..04738ee7857 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -692,8 +692,8 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("create table %s.dst4(id1 int, val varbinary(128), primary key(id1))", vrepldb), "create table src5(id1 int, id2 int, val varbinary(128), primary key(id1))", fmt.Sprintf("create table %s.dst5(id1 int, val varbinary(128), primary key(id1))", vrepldb), - "create table srcCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", - fmt.Sprintf("create table %s.dstCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), + "create table src_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", + fmt.Sprintf("create table %s.dst_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), }) defer execStatements(t, []string{ "drop table src1", @@ -711,8 +711,8 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("drop table %s.dst4", vrepldb), "drop table src5", fmt.Sprintf("drop table %s.dst5", vrepldb), - "drop table srcCharset", - fmt.Sprintf("drop table %s.dstCharset", vrepldb), + "drop table src_charset", + fmt.Sprintf("drop table %s.dst_charset", vrepldb), }) env.SchemaEngine.Reload(context.Background()) @@ -737,8 +737,8 @@ func TestPlayerFilters(t *testing.T) { Match: "dst5", Filter: "select id1, val from src5 where val = 'abc'", }, { - Match: "dstCharset", - Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from srcCharset", + Match: "dst_charset", + Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from src_charset", }}, } bls := &binlogdatapb.BinlogSource{ @@ -986,14 +986,14 @@ func TestPlayerFilters(t *testing.T) { data: [][]string{{"1", "abc"}, {"4", "abc"}}, }, { // test collation + filter - input: "insert into srcCharset values (1,'木元')", + input: "insert into src_charset values (1,'木元')", output: qh.Expect( "begin", - "insert into dstCharset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", + "insert into dst_charset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", "/update _vt.vreplication set pos=", "commit", ), - table: "dstCharset", + table: "dst_charset", data: [][]string{{"1", "木abcxyz", "木abcxyz"}}, }} From 5858e5768f3e38e76ab9f97108a84e773f3da1b2 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 16:14:17 +0100 Subject: [PATCH 05/21] Was incorrectly calling teardown Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/resharding_workflows_v2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index bebbf280f54..4ecb1594b17 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -672,7 +672,6 @@ func setupCustomer2Keyspace(t *testing.T) { func setupMinimalCluster(t *testing.T) *VitessCluster { vc = NewVitessCluster(t, nil) - vc.TearDown() defaultCellName := "zone1" allCellNames = defaultCellName From 1b0b7b8549bec2c7b9d678dc6d5f47e2d2ae514f Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 17:22:55 +0100 Subject: [PATCH 06/21] Remove unused MultiCell test and fold in one subtest into CellAlias Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 19 +++++++ go/test/endtoend/vreplication/migrate_test.go | 3 + .../resharding_workflows_v2_test.go | 1 - .../vreplication/vdiff_helper_test.go | 2 +- .../vreplication/vreplication_test.go | 57 ++----------------- go/test/endtoend/vreplication/vstream_test.go | 3 + 6 files changed, 32 insertions(+), 53 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index a76322b8cac..e4a1d1ffaad 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vttablet" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/mysqlctl" @@ -87,6 +89,23 @@ type ClusterConfig struct { vreplicationCompressGTID bool } +func (cc *ClusterConfig) compressGTID() func() { + cc.vreplicationCompressGTID = true + return func() { + cc.vreplicationCompressGTID = false + } +} + +func setVTTabletExperimentalFlags() func() { + experimentalArgs := fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching) + oldArgs := extraVTTabletArgs + extraVTTabletArgs = append(extraVTTabletArgs, experimentalArgs) + return func() { + extraVTTabletArgs = oldArgs + } +} + // VitessCluster represents all components within the test cluster type VitessCluster struct { t *testing.T diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 3b3bc8ebd9e..15f47455dcb 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -68,6 +68,9 @@ func TestVtctlMigrate(t *testing.T) { defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, "product", 2) + }) // create external cluster extCell := "extcell1" diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 4ecb1594b17..26227f5f24b 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -609,7 +609,6 @@ func testRestOfWorkflow(t *testing.T) { func setupCluster(t *testing.T) *VitessCluster { vc = NewVitessCluster(t, &clusterOptions{cells: []string{"zone1", "zone2"}}) - defer vc.TearDown() defaultCellName := "zone1" allCellNames = defaultCellName diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 7dbc675886b..b60b42e546f 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -34,7 +34,7 @@ import ( const ( vdiffTimeout = 90 * time.Second // we can leverage auto retry on error with this longer-than-usual timeout vdiffRetryTimeout = 30 * time.Second - vdiffStatusCheckInterval = 1 * time.Second + vdiffStatusCheckInterval = 5 * time.Second vdiffRetryInterval = 5 * time.Second ) diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index e3984ccb646..0ab79b04ec7 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -39,7 +39,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vtgateconn" - "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" @@ -280,11 +279,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { } func TestBasicVreplicationWorkflow(t *testing.T) { - ogflags := extraVTTabletArgs - defer func() { extraVTTabletArgs = ogflags }() - // Test VPlayer batching mode. - extraVTTabletArgs = append(extraVTTabletArgs, fmt.Sprintf("--vreplication_experimental_flags=%d", - vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching)) + defer setVTTabletExperimentalFlags() sourceKsOpts["DBTypeVersion"] = "mysql-8.0" targetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") @@ -427,37 +422,6 @@ func TestMoveTablesMariaDBToMySQL(t *testing.T) { testVreplicationWorkflows(t, true /* only do MoveTables */, "") } -func TestMultiCellVreplicationWorkflow(t *testing.T) { - cells := []string{"zone1", "zone2"} - allCellNames = strings.Join(cells, ",") - - vc = NewVitessCluster(t, &clusterOptions{cells: cells}) - defer vc.TearDown() - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - keyspace := "product" - shard := "0" - - cell1 := vc.Cells["zone1"] - cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) - insertInitialData(t) - shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name, true) - isTableInDenyList(t, vc, "product:0", "customer") - // we tag along this test so as not to create the overhead of creating another cluster - testVStreamCellFlag(t) -} - func TestVStreamFlushBinlog(t *testing.T) { defaultCellName := "zone1" allCellNames = defaultCellName @@ -622,23 +586,11 @@ func testVStreamCellFlag(t *testing.T) { // We also reuse the setup of this test to validate that the "vstream * from" vtgate query functionality is functional func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - mainClusterConfig.vreplicationCompressGTID = true - oldVTTabletExtraArgs := extraVTTabletArgs - extraVTTabletArgs = append(extraVTTabletArgs, - // Test VPlayer batching mode. - fmt.Sprintf("--vreplication_experimental_flags=%d", - vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), - ) - defer func() { - mainClusterConfig.vreplicationCompressGTID = false - extraVTTabletArgs = oldVTTabletExtraArgs - }() + defer mainClusterConfig.compressGTID() + defer setVTTabletExperimentalFlags() vc = NewVitessCluster(t, &clusterOptions{cells: cells}) defer vc.TearDown() - allCellNames = "zone1,zone2" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] keyspace := "product" shard := "0" @@ -669,6 +621,9 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { testVStreamFrom(t, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) + isTableInDenyList(t, vc, "product:0", "customer") + // we tag along this test so as not to create the overhead of creating another cluster + testVStreamCellFlag(t) } // testVStreamFrom confirms that the "vstream * from" endpoint is serving data diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 745de5987d5..58d89930e63 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -62,6 +62,9 @@ func testVStreamWithFailover(t *testing.T, failover bool) { verifyClusterHealth(t, vc) insertInitialData(t) + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, "product", 2) + }) ctx := context.Background() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { From 0a12450f48e0b23f901f1e20bb2c75d7cdd92d30 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 18:18:05 +0100 Subject: [PATCH 07/21] Remove global vtgateConn Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 7 ++++ go/test/endtoend/vreplication/fk_ext_test.go | 4 +- go/test/endtoend/vreplication/fk_test.go | 6 ++- .../vreplication/initial_data_test.go | 14 +++++++ .../endtoend/vreplication/materialize_test.go | 4 +- go/test/endtoend/vreplication/migrate_test.go | 4 +- .../vreplication/movetables_buffering_test.go | 1 - .../partial_movetables_seq_test.go | 15 +++++-- .../vreplication/partial_movetables_test.go | 3 +- .../endtoend/vreplication/performance_test.go | 2 +- .../resharding_workflows_v2_test.go | 18 ++++++-- .../endtoend/vreplication/time_zone_test.go | 2 +- go/test/endtoend/vreplication/vdiff2_test.go | 6 ++- .../vreplication/vdiff_helper_test.go | 2 + .../vdiff_multiple_movetables_test.go | 2 +- .../vreplication/vreplication_test.go | 41 ++++++++++++++----- .../vreplication/vschema_load_test.go | 2 +- go/test/endtoend/vreplication/vstream_test.go | 8 ++-- .../main/java/io/vitess/client/SQLFuture.java | 6 +-- 19 files changed, 110 insertions(+), 37 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index e4a1d1ffaad..b9921c35d83 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -881,6 +881,13 @@ func (vc *VitessCluster) GetVTGateConn(t *testing.T) *mysql.Conn { return getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) } +func getVTGateConn() (*mysql.Conn, func()) { + vtgateConn := vc.GetVTGateConn(vc.t) + return vtgateConn, func() { + vtgateConn.Close() + } +} + func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing.T), func(t *testing.T)) { conn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) _, err := conn.ExecuteFetch("begin", 1000, false) diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index 44f7a1d7f14..9f1bca9f5fd 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -117,7 +117,7 @@ func TestFKExt(t *testing.T) { require.NoError(t, err) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, "0"), 1, shardStatusWaitTimeout)) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -297,7 +297,7 @@ func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards } func areRowCountsEqual(t *testing.T) bool { - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() parentRowCount := getRowCount(t, vtgateConn, "target2.parent") childRowCount := getRowCount(t, vtgateConn, "target2.child") diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index a7aca913856..16ee5796b13 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -68,7 +68,7 @@ func TestFKWorkflow(t *testing.T) { require.NoError(t, err) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -140,6 +140,8 @@ func TestFKWorkflow(t *testing.T) { func insertInitialFKData(t *testing.T) { t.Run("insertInitialFKData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sourceKeyspace := "fksource" shard := "0" db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) @@ -275,6 +277,8 @@ func (ls *fkLoadSimulator) delete() { func (ls *fkLoadSimulator) exec(query string) *sqltypes.Result { t := ls.t + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "fksource", query) require.NotNil(t, qr) return qr diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index bf93a040942..23f699563e2 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -27,6 +27,8 @@ import ( func insertInitialData(t *testing.T) { t.Run("insertInitialData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") execMultipleQueries(t, vtgateConn, "product:0", string(lines)) @@ -48,6 +50,8 @@ const NumJSONRows = 100 func insertJSONValues(t *testing.T) { // insert null value combinations + vtgateConn, closeConn := getVTGateConn() + defer closeConn() execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") @@ -76,6 +80,8 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // the number of customer records we are going to // create. The value we get back is the max value // that we reserved. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. @@ -95,16 +101,22 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } func insertMoreProducts(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } @@ -122,6 +134,8 @@ var blobTableQueries = []string{ } func insertIntoBlobTable(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, query := range blobTableQueries { execVtgateQuery(t, vtgateConn, "product:0", query) } diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index 0d437308fb2..b2009b87806 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -86,7 +86,7 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { err = cluster.WaitForHealthyShard(vc.VtctldClient, ks2, shard) require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false) @@ -204,7 +204,7 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 15f47455dcb..c2354e85267 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -64,7 +64,7 @@ func TestVtctlMigrate(t *testing.T) { vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -191,7 +191,7 @@ func TestVtctldMigrate(t *testing.T) { vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index b16cd7c4cc3..a9199423012 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -14,7 +14,6 @@ import ( func TestMoveTablesBuffering(t *testing.T) { defaultRdonly = 1 vc = setupMinimalCluster(t) - defer vtgateConn.Close() defer vc.TearDown() currentWorkflowType = wrangler.MoveTablesWorkflow diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index 348d2865654..d2dfaf4d857 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -173,13 +173,11 @@ func (tc *vrepTestCase) teardown() { } func (tc *vrepTestCase) setupCluster() { - tc.vc = NewVitessCluster(tc.t, nil) vc = tc.vc // for backward compatibility since vc is used globally in this package require.NotNil(tc.t, tc.vc) tc.setupKeyspaces([]string{"commerce", "seqSrc"}) tc.vtgateConn = getConnection(tc.t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) - vtgateConn = tc.vtgateConn // for backward compatibility since vtgateConn is used globally in this package } func (tc *vrepTestCase) initData() { @@ -335,6 +333,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { shard := "80-" var wf80Dash, wfDash80 *workflow currentCustomerCount = getCustomerCount(t, "before customer2.80-") + vtgateConn, closeConn := getVTGateConn() t.Run("Start MoveTables on customer2.80-", func(t *testing.T) { // Now setup the customer2 keyspace so we can do a partial move tables for one of the two shards: 80-. defaultRdonly = 0 @@ -360,8 +359,10 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // Reset any existing vtgate connection state. - vtgateConn.Close() - vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + closeConn() + + vtgateConn, closeConn = getVTGateConn() + defer closeConn() t.Run("Confirm routing rules", func(t *testing.T) { // Global routing rules should be in place with everything going to the source keyspace (customer). @@ -536,6 +537,8 @@ var newCustomerCount = int64(201) var lastCustomerId int64 func getCustomerCount(t *testing.T, msg string) int64 { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select count(*) from customer") require.NotNil(t, qr) count, err := qr.Rows[0][0].ToInt64() @@ -544,6 +547,8 @@ func getCustomerCount(t *testing.T, msg string) int64 { } func confirmLastCustomerIdHasIncreased(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select cid from customer order by cid desc limit 1") require.NotNil(t, qr) currentCustomerId, err := qr.Rows[0][0].ToInt64() @@ -553,6 +558,8 @@ func confirmLastCustomerIdHasIncreased(t *testing.T) { } func insertCustomers(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for i := int64(1); i < newCustomerCount+1; i++ { execVtgateQuery(t, vtgateConn, "customer@primary", fmt.Sprintf("insert into customer(name) values ('name-%d')", currentCustomerCount+i)) } diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index af36b2584dc..3f68841cb42 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -103,7 +103,6 @@ func TestPartialMoveTablesBasic(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() vc = setupMinimalCluster(t) - defer vtgateConn.Close() defer vc.TearDown() setupMinimalCustomerKeyspace(t) @@ -159,6 +158,8 @@ func TestPartialMoveTablesBasic(t *testing.T) { catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2") vdiffSideBySide(t, ksWf, "") + vtgateConn, closeConn := getVTGateConn() + defer closeConn() waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index a8f3470049d..9b9bee7d521 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -63,7 +63,7 @@ create table customer(cid int, name varbinary(128), meta json default null, typ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 26227f5f24b..fdf29ad6b29 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -227,6 +227,8 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl if tabletTypes == "" { tabletTypes = "replica,rdonly" } + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { @@ -244,6 +246,8 @@ func validateReadsRouteToTarget(t *testing.T, tabletTypes string) { } func validateWritesRouteToSource(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) @@ -251,6 +255,8 @@ func validateWritesRouteToSource(t *testing.T) { } func validateWritesRouteToTarget(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) @@ -296,7 +302,6 @@ func TestBasicV2Workflows(t *testing.T) { }() vc = setupCluster(t) - defer vtgateConn.Close() defer vc.TearDown() // Internal tables like the lifecycle ones for OnlineDDL should be ignored @@ -342,6 +347,8 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // sanity check output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "product") require.NoError(t, err) @@ -406,6 +413,8 @@ func testReplicatingWithPKEnumCols(t *testing.T) { // when we re-insert the same row values and ultimately VDiff shows the table as // being identical in both keyspaces. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // typ is an enum, with soho having a stored and binlogged value of 2 deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" @@ -418,6 +427,8 @@ func testReplicatingWithPKEnumCols(t *testing.T) { } func testReshardV2Workflow(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() currentWorkflowType = wrangler.ReshardWorkflow // create internal tables on the original customer shards that should be @@ -445,6 +456,8 @@ func testReshardV2Workflow(t *testing.T) { } func testMoveTablesV2Workflow(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() currentWorkflowType = wrangler.MoveTablesWorkflow // test basic forward and reverse flows @@ -626,7 +639,7 @@ func setupCluster(t *testing.T) *VitessCluster { require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second)) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) @@ -686,7 +699,6 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { require.NoError(t, err) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) verifyClusterHealth(t, vc) insertInitialData(t) diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2da2ad0e3a2..e72b7a885cf 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -54,7 +54,7 @@ func TestMoveTablesTZ(t *testing.T) { err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 69afd1a99ba..54744afb6bb 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -139,7 +139,7 @@ func TestVDiff2(t *testing.T) { require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)) } - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -378,6 +378,8 @@ func testNoOrphanedData(t *testing.T, keyspace, workflow string, shards []string func testResume(t *testing.T, tc *testCase, cells string) { t.Run("Resume", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state @@ -421,6 +423,8 @@ func testStop(t *testing.T, ksWorkflow, cells string) { func testAutoRetryError(t *testing.T, tc *testCase, cells string) { t.Run("Auto retry on error", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index b60b42e546f..88e462d2eaa 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -341,6 +341,8 @@ func encodeString(in string) string { // generateMoreCustomers creates additional test data for better tests // when needed. func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Generating more test data with an additional %d customers", numCustomers) res := execVtgateQuery(t, vtgateConn, keyspace, "select max(cid) from customer") startingID, _ := res.Rows[0][0].ToInt64() diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index b40efd7f7a1..9bbeb93e718 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -52,7 +52,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { require.NoError(t, err) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 0ab79b04ec7..c567dc83124 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -50,10 +50,10 @@ import ( ) var ( - vc *VitessCluster - vtgate *cluster.VtgateProcess - defaultCell *Cell - vtgateConn *mysql.Conn + vc *VitessCluster + vtgate *cluster.VtgateProcess + defaultCell *Cell + //vtgateConn *mysql.Conn defaultRdonly int defaultReplicas int allCellNames string @@ -145,7 +145,7 @@ func TestVReplicationDDLHandling(t *testing.T) { require.NoError(t, err) verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) targetTab := vc.getPrimaryTablet(t, targetKs, shard) @@ -323,7 +323,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -450,7 +450,7 @@ func TestVStreamFlushBinlog(t *testing.T) { require.NoError(t, err) verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) @@ -467,7 +467,7 @@ func TestVStreamFlushBinlog(t *testing.T) { // Generate a lot of binlog event bytes targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 1024 - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', '%s', now())" for i := 100; i < 10000; i++ { randStr, err := randHex(6500) @@ -611,7 +611,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { require.NoError(t, err) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -723,7 +723,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // The wait in the next code block which checks that customer.dec80 is updated, also confirms that the // blob-related dmls we execute here are vreplicated. insertIntoBlobTable(t) - + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() // Confirm that the 0 scale decimal field, dec80, is replicated correctly dec80Replicated := false execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") @@ -910,6 +911,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl func validateRollupReplicates(t *testing.T) { t.Run("validateRollupReplicates", func(t *testing.T) { insertMoreProducts(t) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() waitForRowCount(t, vtgateConn, "product", "rollup", 1) waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) @@ -918,6 +921,8 @@ func validateRollupReplicates(t *testing.T) { func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias string) { t.Run("reshardCustomer2to4Split", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", @@ -931,6 +936,8 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str func reshardMerchant2to3SplitMerge(t *testing.T) { t.Run("reshardMerchant2to3SplitMerge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", @@ -978,6 +985,8 @@ func reshardMerchant2to3SplitMerge(t *testing.T) { func reshardMerchant3to1Merge(t *testing.T) { t.Run("reshardMerchant3to1Merge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-2000": 3} reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", @@ -1068,6 +1077,8 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou func shardOrders(t *testing.T) { t.Run("shardOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "o2c" cell := defaultCell.Name sourceKs := "product" @@ -1110,6 +1121,8 @@ func checkThatVDiffFails(t *testing.T, keyspace, workflow string) { func shardMerchant(t *testing.T) { t.Run("shardMerchant", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "p2m" cell := defaultCell.Name sourceKs := "product" @@ -1263,6 +1276,8 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { func materializeRollup(t *testing.T, useVtctldClient bool) { t.Run("materializeRollup", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" workflow := "rollup" applyVSchema(t, materializeSalesVSchema, keyspace) @@ -1277,6 +1292,8 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { func materializeSales(t *testing.T, useVtctldClient bool) { t.Run("materializeSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) materialize(t, materializeSalesSpec, useVtctldClient) @@ -1290,6 +1307,8 @@ func materializeSales(t *testing.T, useVtctldClient bool) { func materializeMerchantSales(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "msales" materialize(t, materializeMerchantSalesSpec, useVtctldClient) merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") @@ -1304,6 +1323,8 @@ func materializeMerchantSales(t *testing.T, useVtctldClient bool) { func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "morders" keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index 8f66df82090..05a5d8e94b2 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -52,7 +52,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") require.NoError(t, err) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() // ch is used to signal that there is significant data inserted into the tables and when a lot of vschema changes have been applied diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 58d89930e63..28ecb77a854 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -57,7 +57,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -219,6 +219,8 @@ const vschemaSharded = ` ` func insertRow(keyspace, table string, id int) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false) vtgateConn.ExecuteFetch("begin", 1000, false) _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) @@ -254,7 +256,7 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID err := cluster.WaitForHealthyShard(vc.VtctldClient, "unsharded", "0") require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -403,7 +405,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven require.NotNil(t, vtgate) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/java/client/src/main/java/io/vitess/client/SQLFuture.java b/java/client/src/main/java/io/vitess/client/SQLFuture.java index 0fcfb4521bc..5804b7079ad 100644 --- a/java/client/src/main/java/io/vitess/client/SQLFuture.java +++ b/java/client/src/main/java/io/vitess/client/SQLFuture.java @@ -38,7 +38,7 @@ *

* For users who want to get results synchronously, we provide {@link #checkedGet()} as a * convenience method. Unlike {@link #get()}, it throws only {@code SQLException}, so e.g. {@code - * vtgateConn.execute(...).checkedGet()} behaves the same as our old synchronous API. + * vtgateConn2.execute(...).checkedGet()} behaves the same as our old synchronous API. * *

* The additional methods are similar to the {@code CheckedFuture} interface (marked as beta), but @@ -60,7 +60,7 @@ public SQLFuture(ListenableFuture delegate) { * *

* This can be used to effectively turn the Vitess client into a synchronous API. For example: - * {@code Cursor cursor = vtgateConn.execute(...).checkedGet();} + * {@code Cursor cursor = vtgateConn2.execute(...).checkedGet();} */ public V checkedGet() throws SQLException { try { @@ -78,7 +78,7 @@ public V checkedGet() throws SQLException { * *

* This can be used to effectively turn the Vitess client into a synchronous API. For example: - * {@code Cursor cursor = vtgateConn.execute(...).checkedGet();} + * {@code Cursor cursor = vtgateConn2.execute(...).checkedGet();} */ public V checkedGet(long timeout, TimeUnit unit) throws TimeoutException, SQLException { try { From 098e821e53dce7391ca98dddd5c1ed925a7e0eb0 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 19:34:46 +0100 Subject: [PATCH 08/21] Fix issues with vstream test related to refactoring of global vtgate variable Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/helper_test.go | 13 +++++ .../endtoend/vreplication/time_zone_test.go | 2 - go/test/endtoend/vreplication/vstream_test.go | 52 ++++++++++--------- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index fc7d66bc732..b4e3dd2e6eb 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -103,6 +103,19 @@ func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { require.NoError(t, err) return qr } +func getConnectionNoError(t *testing.T, hostname string, port int) *mysql.Conn { + vtParams := mysql.ConnParams{ + Host: hostname, + Port: port, + Uname: "vt_dba", + } + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + if err != nil { + return nil + } + return conn +} func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index e72b7a885cf..064c334cea4 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -49,8 +49,6 @@ func TestMoveTablesTZ(t *testing.T) { cell1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) require.NoError(t, err) diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 28ecb77a854..c276cc329ab 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -219,8 +219,10 @@ const vschemaSharded = ` ` func insertRow(keyspace, table string, id int) { - vtgateConn, closeConn := getVTGateConn() - defer closeConn() + vtgateConn := getConnectionNoError(vc.t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + if vtgateConn == nil { + return + } vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false) vtgateConn.ExecuteFetch("begin", 1000, false) _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) @@ -232,8 +234,8 @@ func insertRow(keyspace, table string, id int) { type numEvents struct { numRowEvents, numJournalEvents int64 - numLessThan80Events, numGreaterThan80Events int64 - numLessThan40Events, numGreaterThan40Events int64 + numDash80Events, num80DashEvents int64 + numDash40Events, num40DashEvents int64 numShard0BeforeReshardEvents, numShard0AfterReshardEvents int64 } @@ -328,13 +330,13 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID shard := ev.RowEvent.Shard switch shard { case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -470,13 +472,13 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven ne.numShard0BeforeReshardEvents++ } case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -524,7 +526,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer") insertedCustomerRows, err := customerResult.Rows[0][0].ToCastInt64() require.NoError(t, err) - require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events) + require.Equal(t, insertedCustomerRows, ne.numDash80Events+ne.num80DashEvents+ne.numDash40Events+ne.num40DashEvents) return ne } @@ -536,20 +538,20 @@ func TestVStreamStopOnReshardTrue(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, true, 1000) require.Greater(t, ne.numJournalEvents, int64(0)) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.Zero(t, ne.numLessThan40Events) - require.Zero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.Zero(t, ne.numDash40Events) + require.Zero(t, ne.num40DashEvents) } func TestVStreamStopOnReshardFalse(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, false, 2000) require.Equal(t, int64(0), ne.numJournalEvents) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } func TestVStreamWithKeyspacesToWatch(t *testing.T) { @@ -566,8 +568,8 @@ func TestVStreamCopyMultiKeyspaceReshard(t *testing.T) { require.NotZero(t, ne.numRowEvents) require.NotZero(t, ne.numShard0BeforeReshardEvents) require.NotZero(t, ne.numShard0AfterReshardEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } From a940980f5658c0b09bddfcca437b7f954b3e403b Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 20:14:05 +0100 Subject: [PATCH 09/21] Remove some global cell attributes Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/fk_ext_test.go | 4 +- go/test/endtoend/vreplication/fk_test.go | 3 -- go/test/endtoend/vreplication/helper_test.go | 14 +++++ .../endtoend/vreplication/materialize_test.go | 9 +--- go/test/endtoend/vreplication/migrate_test.go | 8 +-- .../endtoend/vreplication/performance_test.go | 4 +- .../resharding_workflows_v2_test.go | 17 +++--- .../endtoend/vreplication/time_zone_test.go | 5 +- go/test/endtoend/vreplication/vdiff2_test.go | 7 +-- .../vdiff_multiple_movetables_test.go | 3 -- .../vreplication/vreplication_test.go | 52 ++++++++++--------- .../vreplication/vschema_load_test.go | 4 +- go/test/endtoend/vreplication/vstream_test.go | 13 ++--- 13 files changed, 64 insertions(+), 79 deletions(-) diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index 9f1bca9f5fd..20cd8f170ed 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -101,9 +101,7 @@ func TestFKExt(t *testing.T) { cells: cells, clusterConfig: fkextConfig.ClusterConfig, }) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] cell := vc.Cells[cellName] defer vc.TearDown() diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 16ee5796b13..ba60fec0e48 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -51,9 +51,6 @@ func TestFKWorkflow(t *testing.T) { cellName := "zone1" vc = NewVitessCluster(t, nil) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "fksource" shardName := "0" diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index b4e3dd2e6eb..f221175dc9c 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -920,3 +920,17 @@ func waitForCondition(name string, condition func() bool, timeout time.Duration) } } } + +func getCellNames(cells []*Cell) string { + var cellNames []string + if cells == nil { + cells = []*Cell{} + for _, cell := range vc.Cells { + cells = append(cells, cell) + } + } + for _, cell := range cells { + cellNames = append(cellNames, cell.Name) + } + return strings.Join(cellNames, ",") +} diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index b2009b87806..d55a71d33aa 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -63,8 +63,6 @@ const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2 // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters func testShardedMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) ks1 := "ks1" ks2 := "ks2" @@ -74,8 +72,7 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { defer func() { defaultReplicas = 1 }() defer vc.TearDown() - - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) @@ -181,8 +178,6 @@ RETURN id * length(val); ` func testMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) sourceKs := "source" targetKs := "target" @@ -193,7 +188,7 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index c2354e85267..37917a8485f 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -48,15 +48,13 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctlMigrate(t *testing.T) { - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) defaultReplicas = 0 defaultRdonly = 0 defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") @@ -174,15 +172,13 @@ func TestVtctlMigrate(t *testing.T) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctldMigrate(t *testing.T) { - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) defaultReplicas = 0 defaultRdonly = 0 defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index 9b9bee7d521..c03edc4c0b5 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -50,12 +50,10 @@ create table customer(cid int, name varbinary(128), meta json default null, typ const sourceKs = "stress_src" const targetKs = "stress_tgt" - allCellNames = defaultCellName - vc = NewVitessCluster(t, nil) defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index fdf29ad6b29..450e53b31b4 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -268,7 +268,7 @@ func validateWritesRouteToTarget(t *testing.T) { func revert(t *testing.T, workflowType string) { switchWrites(t, workflowType, ksWorkflow, true) validateWritesRouteToSource(t) - switchReadsNew(t, workflowType, allCellNames, ksWorkflow, true) + switchReadsNew(t, workflowType, getCellNames(nil), ksWorkflow, true) validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup @@ -623,10 +623,6 @@ func testRestOfWorkflow(t *testing.T) { func setupCluster(t *testing.T) *VitessCluster { vc = NewVitessCluster(t, &clusterOptions{cells: []string{"zone1", "zone2"}}) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] - zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] @@ -642,7 +638,7 @@ func setupCluster(t *testing.T) *VitessCluster { defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) - + defaultCell := vc.Cells[vc.CellNames[0]] sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-102"].Vttablet @@ -661,6 +657,7 @@ func setupCustomerKeyspace(t *testing.T) { require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second)) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second)) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -685,9 +682,7 @@ func setupCustomer2Keyspace(t *testing.T) { func setupMinimalCluster(t *testing.T) *VitessCluster { vc = NewVitessCluster(t, nil) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] zone1 := vc.Cells["zone1"] @@ -716,6 +711,7 @@ func setupMinimalCustomerKeyspace(t *testing.T) { require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -755,7 +751,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias catchup(t, targetTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") } - + allCellNames := getCellNames(cells) var switchReadsFollowedBySwitchWrites = func() { moveTablesAndWait() @@ -835,6 +831,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias func createAdditionalCustomerShards(t *testing.T, shards string) { ksName := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) arrTargetShardNames := strings.Split(shards, ",") diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 064c334cea4..144565fe3d2 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -32,8 +32,6 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { - allCellNames = "zone1" - defaultCellName := "zone1" workflow := "tz" sourceKs := "product" targetKs := "customer" @@ -43,7 +41,7 @@ func TestMoveTablesTZ(t *testing.T) { vc = NewVitessCluster(t, nil) defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] cells := []*Cell{defaultCell} cell1 := vc.Cells["zone1"] @@ -89,7 +87,6 @@ func TestMoveTablesTZ(t *testing.T) { err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) require.NoError(t, err) - defaultCell := vc.Cells["zone1"] custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 54744afb6bb..cd64e625611 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -112,7 +112,7 @@ var testCases = []*testCase{ } func TestVDiff2(t *testing.T) { - allCellNames = "zone5,zone1,zone2,zone3,zone4" + cellNames := "zone5,zone1,zone2,zone3,zone4" sourceKs := "product" sourceShards := []string{"0"} targetKs := "customer" @@ -120,13 +120,13 @@ func TestVDiff2(t *testing.T) { // This forces us to use multiple vstream packets even with small test tables. extraVTTabletArgs = []string{"--vstream_packet_size=1"} - vc = NewVitessCluster(t, &clusterOptions{cells: strings.Split(allCellNames, ",")}) + vc = NewVitessCluster(t, &clusterOptions{cells: strings.Split(cellNames, ",")}) defer vc.TearDown() zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] zone3 := vc.Cells["zone3"] - defaultCell = zone1 + defaultCell := zone1 // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. @@ -186,6 +186,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, if tc.typ == "Reshard" { args = append(args, "--source_shards", tc.sourceShards, "--target_shards", tc.targetShards) } + allCellNames := getCellNames(nil) args = append(args, "--cells", allCellNames) args = append(args, "--tables", tc.tables) args = append(args, "Create") diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 9bbeb93e718..2ceb11677f5 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -37,9 +37,6 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { vc = NewVitessCluster(t, nil) defer vc.TearDown() - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "product" shardName := "0" diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index c567dc83124..7cb892cefb7 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -50,13 +50,10 @@ import ( ) var ( - vc *VitessCluster - vtgate *cluster.VtgateProcess - defaultCell *Cell - //vtgateConn *mysql.Conn + vc *VitessCluster + vtgate *cluster.VtgateProcess defaultRdonly int defaultReplicas int - allCellNames string sourceKsOpts = make(map[string]string) targetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) @@ -129,7 +126,7 @@ func TestVReplicationDDLHandling(t *testing.T) { shard := "0" vc = NewVitessCluster(t, nil) defer vc.TearDown() - defaultCell = vc.Cells[cell] + defaultCell := vc.Cells[cell] if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) @@ -235,7 +232,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { shard := "0" vc = NewVitessCluster(t, nil) defer vc.TearDown() - defaultCell = vc.Cells[cell] + defaultCell := vc.Cells[cell] // To test vstreamer source throttling for the MoveTables operation maxSourceTrxHistory := int64(5) extraVTTabletArgs = []string{ @@ -301,10 +298,7 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) - - require.NotNil(t, vc) // Keep the cluster processes minimal to deal with CI resource constraints defaultReplicas = 0 defaultRdonly = 0 @@ -316,7 +310,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) @@ -424,13 +418,12 @@ func TestMoveTablesMariaDBToMySQL(t *testing.T) { func TestVStreamFlushBinlog(t *testing.T) { defaultCellName := "zone1" - allCellNames = defaultCellName workflow := "test_vstream_p2c" shard := "0" vc = NewVitessCluster(t, nil) require.NotNil(t, vc) defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[defaultCellName] // Keep the cluster processes minimal (no rdonly and no replica tablets) // to deal with CI resource constraints. @@ -793,8 +786,9 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } } vdiffSideBySide(t, ksWorkflow, "") - switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + cellNames := getCellNames(cells) + switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) + switchReads(t, workflowType, cellNames, ksWorkflow, false) assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) var commit func(t *testing.T) @@ -838,7 +832,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate - switchReads(t, workflowType, allCellNames, ksWorkflow, true) + switchReads(t, workflowType, cellNames, ksWorkflow, true) printShardPositions(vc, ksShards) switchWrites(t, workflowType, ksWorkflow, true) @@ -858,7 +852,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl waitForNoWorkflowLag(t, vc, targetKs, workflow) // Go forward again - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, cellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) var exists bool @@ -866,7 +860,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) - moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") @@ -1020,12 +1014,14 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou tabletIDBase int, counts map[string]int, dryRunResultSwitchReads, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string, autoIncrementStep int) { t.Run("reshard", func(t *testing.T) { + defaultCell := vc.Cells[vc.CellNames[0]] if cells == nil { cells = []*Cell{defaultCell} } if sourceCellOrAlias == "" { sourceCellOrAlias = defaultCell.Name } + callNames := getCellNames(cells) ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) @@ -1058,13 +1054,13 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou restartWorkflow(t, ksWorkflow) vdiffSideBySide(t, ksWorkflow, "") if dryRunResultSwitchReads != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica") if dryRunResultSwitchWrites != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary") reshardAction(t, "Complete", workflow, ksName, "", "", "", "") for tabletName, count := range counts { if tablets[tabletName] == nil { @@ -1079,6 +1075,7 @@ func shardOrders(t *testing.T) { t.Run("shardOrders", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() + defaultCell := vc.Cells[vc.CellNames[0]] workflow := "o2c" cell := defaultCell.Name sourceKs := "product" @@ -1095,7 +1092,7 @@ func shardOrders(t *testing.T) { catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1) @@ -1124,6 +1121,7 @@ func shardMerchant(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() workflow := "p2m" + defaultCell := vc.Cells[vc.CellNames[0]] cell := defaultCell.Name sourceKs := "product" targetKs := merchantKeyspace @@ -1145,7 +1143,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab2, workflow, workflowType) vdiffSideBySide(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) printRoutingRules(t, vc, "After merchant movetables") @@ -1198,6 +1196,7 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { // materializing from "product" keyspace to "customer" keyspace workflow := "cproduct" keyspace := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec, useVtctldClient) customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "primary") @@ -1281,6 +1280,7 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { keyspace := "product" workflow := "rollup" applyVSchema(t, materializeSalesVSchema, keyspace) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec, useVtctldClient) catchup(t, productTab, workflow, "Materialize") @@ -1297,6 +1297,7 @@ func materializeSales(t *testing.T, useVtctldClient bool) { keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) materialize(t, materializeSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") waitForRowCount(t, vtgateConn, "product", "sales", 2) @@ -1311,6 +1312,7 @@ func materializeMerchantSales(t *testing.T, useVtctldClient bool) { defer vtgateConn.Close() workflow := "msales" materialize(t, materializeMerchantSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1329,6 +1331,7 @@ func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) materialize(t, materializeMerchantOrdersSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1589,6 +1592,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index 05a5d8e94b2..5b0a2548dfb 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -40,12 +40,10 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { extendedTimeout := defaultTimeout * 4 - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index c276cc329ab..e7d19947cb2 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -43,8 +43,6 @@ import ( // - We ensure that this works through active reparents and doesn't miss any events // - We stream only from the primary and while streaming we reparent to a replica and then back to the original primary func testVStreamWithFailover(t *testing.T, failover bool) { - defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) require.NotNil(t, vc) @@ -52,7 +50,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defaultRdonly = 0 defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) @@ -242,7 +240,6 @@ type numEvents struct { // tests the StopOnReshard flag func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID int) *numEvents { defaultCellName := "zone1" - allCellNames = "zone1" vc = NewVitessCluster(t, nil) require.NotNil(t, vc) @@ -251,7 +248,7 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) @@ -390,18 +387,14 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID // Validate that we can continue streaming from multiple keyspaces after first copying some tables and then resharding one of the keyspaces // Ensure that there are no missing row events during the resharding process. func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents { - defaultCellName := "zone1" - allCellNames = defaultCellName vc = NewVitessCluster(t, nil) - - require.NotNil(t, vc) ogdr := defaultReplicas defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func(dr int) { defaultReplicas = dr }(ogdr) defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) From 2e896dd81d5fdf8a30ddef0f501f3936b644c160 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 21:03:07 +0100 Subject: [PATCH 10/21] Combine vstream workflows into one Signed-off-by: Rohit Nayak --- ...lover.yml => cluster_endtoend_vstream.yml} | 10 +- ...r_endtoend_vstream_stoponreshard_false.yml | 148 ------------------ ...er_endtoend_vstream_stoponreshard_true.yml | 148 ------------------ ...dtoend_vstream_with_keyspaces_to_watch.yml | 148 ------------------ test/ci_workflow_gen.go | 5 +- test/config.json | 8 +- 6 files changed, 10 insertions(+), 457 deletions(-) rename .github/workflows/{cluster_endtoend_vstream_failover.yml => cluster_endtoend_vstream.yml} (94%) delete mode 100644 .github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml delete mode 100644 .github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml delete mode 100644 .github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream.yml similarity index 94% rename from .github/workflows/cluster_endtoend_vstream_failover.yml rename to .github/workflows/cluster_endtoend_vstream.yml index 8af2b0ad1bd..6d8d11bf912 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vstream_failover) +name: Cluster (vstream) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_failover)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vstream_failover) + name: Run endtoend tests on Cluster (vstream) runs-on: gh-hosted-runners-4cores-1 steps: @@ -65,7 +65,7 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_failover.yml' + - '.github/workflows/cluster_endtoend_vstream.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,7 +134,7 @@ jobs: set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vstream | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml deleted file mode 100644 index c3a39b76267..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_false) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_false)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_false) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml deleted file mode 100644 index 02979f5cbc8..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_true) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_true)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_true) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml deleted file mode 100644 index 449c49974db..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_with_keyspaces_to_watch) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_with_keyspaces_to_watch)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 1bc76a2868a..f8f01558d36 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -80,10 +80,7 @@ var ( "21", "22", "mysql_server_vault", - "vstream_failover", - "vstream_stoponreshard_true", - "vstream_stoponreshard_false", - "vstream_with_keyspaces_to_watch", + "vstream", "onlineddl_ghost", "onlineddl_vrepl", "onlineddl_vrepl_stress", diff --git a/test/config.json b/test/config.json index cc68301a052..00bbb3f1aa3 100644 --- a/test/config.json +++ b/test/config.json @@ -1126,7 +1126,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamFailover"], "Command": [], "Manual": false, - "Shard": "vstream_failover", + "Shard": "vstream", "RetryMax": 3, "Tags": [] }, @@ -1135,7 +1135,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardTrue"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_true", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1144,7 +1144,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardFalse"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_false", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1153,7 +1153,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamWithKeyspacesToWatch"], "Command": [], "Manual": false, - "Shard": "vstream_with_keyspaces_to_watch", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, From e2e356a0036f93638c04c9feea2f5548f5b6ada7 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 12 Dec 2023 21:16:24 +0100 Subject: [PATCH 11/21] Reorganize vreplication workflows to combine into fewer. Remove MultiCell which has been deleted Signed-off-by: Rohit Nayak --- ...luster_endtoend_vreplication_multicell.yml | 170 ------------------ ...on_partial_movetables_and_materialize.yml} | 10 +- ...plication_partial_movetables_sequences.yml | 170 ------------------ test/ci_workflow_gen.go | 8 +- test/config.json | 19 +- 5 files changed, 13 insertions(+), 364 deletions(-) delete mode 100644 .github/workflows/cluster_endtoend_vreplication_multicell.yml rename .github/workflows/{cluster_endtoend_vreplication_partial_movetables_basic.yml => cluster_endtoend_vreplication_partial_movetables_and_materialize.yml} (95%) delete mode 100644 .github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml deleted file mode 100644 index 804e21fc042..00000000000 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ /dev/null @@ -1,170 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vreplication_multicell) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multicell)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vreplication_multicell) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_multicell.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # Increase our open file descriptor limit as we could hit this - ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf - innodb_buffer_pool_dump_at_shutdown=OFF - innodb_buffer_pool_in_core_file=OFF - innodb_buffer_pool_load_at_startup=OFF - innodb_buffer_pool_size=64M - innodb_doublewrite=OFF - innodb_flush_log_at_trx_commit=0 - innodb_flush_method=O_DIRECT - innodb_numa_interleave=ON - innodb_adaptive_hash_index=OFF - sync_binlog=0 - sync_relay_log=0 - performance_schema=OFF - slow-query-log=OFF - EOF - - cat <<-EOF>>./config/mycnf/mysql80.cnf - binlog-transaction-compression=ON - EOF - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml similarity index 95% rename from .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml index 3a9af3b52b8..f748a2e60d1 100644 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vreplication_partial_movetables_basic) +name: Cluster (vreplication_partial_movetables_and_materialize) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_and_materialize)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic) + name: Run endtoend tests on Cluster (vreplication_partial_movetables_and_materialize) runs-on: gh-hosted-runners-4cores-1 steps: @@ -65,7 +65,7 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -156,7 +156,7 @@ jobs: EOF # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_and_materialize | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml deleted file mode 100644 index bad123c2ea4..00000000000 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml +++ /dev/null @@ -1,170 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vreplication_partial_movetables_sequences) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # Increase our open file descriptor limit as we could hit this - ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf - innodb_buffer_pool_dump_at_shutdown=OFF - innodb_buffer_pool_in_core_file=OFF - innodb_buffer_pool_load_at_startup=OFF - innodb_buffer_pool_size=64M - innodb_doublewrite=OFF - innodb_flush_log_at_trx_commit=0 - innodb_flush_method=O_DIRECT - innodb_numa_interleave=ON - innodb_adaptive_hash_index=OFF - sync_binlog=0 - sync_relay_log=0 - performance_schema=OFF - slow-query-log=OFF - EOF - - cat <<-EOF>>./config/mycnf/mysql80.cnf - binlog-transaction-compression=ON - EOF - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index f8f01558d36..c60076e9766 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -86,7 +86,6 @@ var ( "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", "onlineddl_vrepl_suite", - "vreplication_migrate_vdiff2_convert_tz", "onlineddl_revert", "onlineddl_scheduler", "tabletmanager_throttler_topo", @@ -113,13 +112,12 @@ var ( "xb_recovery", "mysql80", "vreplication_across_db_versions", - "vreplication_multicell", - "vreplication_cellalias", "vreplication_basic", + "vreplication_cellalias", "vreplication_v2", - "vreplication_partial_movetables_basic", - "vreplication_partial_movetables_sequences", + "vreplication_partial_movetables_and_materialize", "vreplication_foreign_key_stress", + "vreplication_migrate_vdiff2_convert_tz", "schemadiff_vrepl", "topo_connection_cache", "vtgate_partial_keyspace", diff --git a/test/config.json b/test/config.json index 00bbb3f1aa3..2a7b18fa299 100644 --- a/test/config.json +++ b/test/config.json @@ -1004,21 +1004,12 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_multicell": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "MultiCell"], - "Command": [], - "Manual": false, - "Shard": "vreplication_multicell", - "RetryMax": 2, - "Tags": [] - }, "vreplication_materialize": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterialize"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1027,7 +1018,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterializeVtctldClient"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1045,7 +1036,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "PartialMoveTablesBasic"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1054,7 +1045,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMultipleConcurrentVDiffs"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1108,7 +1099,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestPartialMoveTablesWithSequences"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_sequences", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 1, "Tags": [] }, From 2aadb6a0be83e839caeb463c0b64c156bab4ad50 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 12:59:52 +0100 Subject: [PATCH 12/21] Refactor WaitForStatusOfTabletInShard usage Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 54 +++++++++++++++---- go/test/endtoend/vreplication/fk_ext_test.go | 8 --- go/test/endtoend/vreplication/fk_test.go | 13 +---- .../endtoend/vreplication/materialize_test.go | 13 +---- go/test/endtoend/vreplication/migrate_test.go | 12 +---- .../partial_movetables_seq_test.go | 5 -- .../endtoend/vreplication/performance_test.go | 3 -- .../resharding_workflows_v2_test.go | 26 --------- .../endtoend/vreplication/time_zone_test.go | 7 --- go/test/endtoend/vreplication/vdiff2_test.go | 11 +--- .../vdiff_multiple_movetables_test.go | 11 ---- .../vreplication/vreplication_test.go | 47 +++------------- .../vreplication/vschema_load_test.go | 9 +--- go/test/endtoend/vreplication/vstream_test.go | 18 ++----- 14 files changed, 62 insertions(+), 175 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index b9921c35d83..f63b6527d49 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -397,6 +397,7 @@ func NewVitessCluster(t *testing.T, opts *clusterOptions) *VitessCluster { vc.setupVtctl() vc.setupVtctlClient() vc.setupVtctldClient() + return vc } @@ -485,8 +486,14 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, cell.Keyspaces[ksName] = keyspace cellsToWatch = cellsToWatch + cell.Name } - require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) + for _, cell := range cells { + if len(cell.Vtgates) == 0 { + log.Infof("Starting vtgate") + vc.StartVtgate(t, cell, cellsToWatch) + } + } + require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) if schema != "" { if err := vc.VtctlClient.ApplySchema(ksName, schema); err != nil { t.Fatalf(err.Error()) @@ -499,12 +506,6 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, } } keyspace.VSchema = vschema - for _, cell := range cells { - if len(cell.Vtgates) == 0 { - log.Infof("Starting vtgate") - vc.StartVtgate(t, cell, cellsToWatch) - } - } err = vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", ksName) require.NoError(t, err) @@ -580,11 +581,11 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } - arrNames := strings.Split(names, ",") - log.Infof("Addshards got %d shards with %+v", len(arrNames), arrNames) - isSharded := len(arrNames) > 1 + shardNames := strings.Split(names, ",") + log.Infof("Addshards got %d shards with %+v", len(shardNames), shardNames) + isSharded := len(shardNames) > 1 primaryTabletUID := 0 - for ind, shardName := range arrNames { + for ind, shardName := range shardNames { tabletID := tabletIDBase + ind*100 tabletIndex := 0 shard := &Shard{Name: shardName, IsSharded: isSharded, Tablets: make(map[string]*Tablet, 1)} @@ -695,6 +696,37 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa log.Infof("Finished creating shard %s", shard.Name) } + for _, shard := range shardNames { + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, keyspace.Name, shard)) + } + + waitTimeout := 30 * time.Second + vtgate := cells[0].Vtgates[0] + for _, shard := range keyspace.Shards { + numReplicas, numRDOnly := 0, 0 + for _, tablet := range shard.Tablets { + switch strings.ToLower(tablet.Vttablet.TabletType) { + case "replica": + numReplicas++ + case "rdonly": + numRDOnly++ + } + } + numReplicas-- // account for primary, which also has replica type + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, waitTimeout); err != nil { + return err + } + if numReplicas > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), numReplicas, waitTimeout); err != nil { + return err + } + } + if numRDOnly > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), numRDOnly, waitTimeout); err != nil { + return err + } + } + } err := vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", keyspace.Name) require.NoError(t, err) diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index 20cd8f170ed..ef04f0e6996 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -109,14 +109,6 @@ func TestFKExt(t *testing.T) { sourceKeyspace := fkextConfig.sourceKeyspaceName vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, "0", FKExtSourceVSchema, FKExtSourceSchema, 0, 0, 100, nil) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, "0"), 1, shardStatusWaitTimeout)) - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) lg = &SimpleLoadGenerator{} diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index ba60fec0e48..a313de09488 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vttablet" @@ -59,19 +58,10 @@ func TestFKWorkflow(t *testing.T) { cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) + insertInitialFKData(t) var ls *fkLoadSimulator - - insertInitialFKData(t) withLoad := true // Set it to false to skip load simulation, while debugging var cancel context.CancelFunc var ctx context.Context @@ -90,7 +80,6 @@ func TestFKWorkflow(t *testing.T) { targetKeyspace := "fktarget" targetTabletId := 200 vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) workflowName := "fk" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index d55a71d33aa..385afb02d1b 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -20,8 +20,6 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) const smSchema = ` @@ -63,10 +61,10 @@ const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2 // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters func testShardedMaterialize(t *testing.T, useVtctldClient bool) { + var err error vc = NewVitessCluster(t, nil) ks1 := "ks1" ks2 := "ks2" - shard := "0" require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() @@ -76,12 +74,8 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, ks1, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, ks2, shard) - require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -178,6 +172,7 @@ RETURN id * length(val); ` func testMaterialize(t *testing.T, useVtctldClient bool) { + var err error vc = NewVitessCluster(t, nil) sourceKs := "source" targetKs := "target" @@ -192,12 +187,8 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 37917a8485f..727dab4f69a 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -25,8 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -57,8 +55,6 @@ func TestVtctlMigrate(t *testing.T) { defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") @@ -67,7 +63,7 @@ func TestVtctlMigrate(t *testing.T) { verifyClusterHealth(t, vc) insertInitialData(t) t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, "product", 2) + testVStreamFrom(t, vtgate, "product", 2) }) // create external cluster @@ -80,8 +76,6 @@ func TestVtctlMigrate(t *testing.T) { extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) @@ -182,8 +176,6 @@ func TestVtctldMigrate(t *testing.T) { _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") @@ -207,8 +199,6 @@ func TestVtctldMigrate(t *testing.T) { extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index d2dfaf4d857..9491363aed2 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -20,7 +20,6 @@ import ( "fmt" "strings" "testing" - "time" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -208,10 +207,6 @@ func (tc *vrepTestCase) setupKeyspace(ks *keyspace) { tc.vtgate = defaultCell.Vtgates[0] } - for _, shard := range ks.shards { - require.NoError(t, cluster.WaitForHealthyShard(tc.vc.VtctldClient, ks.name, shard)) - require.NoError(t, tc.vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks.name, shard), 1, 30*time.Second)) - } } func (tc *vrepTestCase) newWorkflow(typ, workflowName, fromKeyspace, toKeyspace string, options *workflowOptions) *workflow { diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index c03edc4c0b5..75fb900fd91 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -58,9 +58,6 @@ create table customer(cid int, name varbinary(128), meta json default null, typ vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 450e53b31b4..546ca0f4ae3 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -628,13 +628,6 @@ func setupCluster(t *testing.T) *VitessCluster { vc.AddKeyspace(t, []*Cell{zone1, zone2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second)) - defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) @@ -651,12 +644,6 @@ func setupCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second)) defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet @@ -672,11 +659,6 @@ func setupCustomer2Keyspace(t *testing.T) { customerVSchema, customerSchema, 0, 0, 1200, nil); err != nil { t.Fatal(err) } - for _, c2shard := range c2shards { - err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard) - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second)) - } } func setupMinimalCluster(t *testing.T) *VitessCluster { @@ -690,8 +672,6 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { vtgate = zone1.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) verifyClusterHealth(t, vc) @@ -707,10 +687,6 @@ func setupMinimalCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, 0, 0, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet @@ -837,8 +813,6 @@ func createAdditionalCustomerShards(t *testing.T, shards string) { arrTargetShardNames := strings.Split(shards, ",") for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second)) require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second)) } diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 144565fe3d2..ff334c593fe 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -35,7 +35,6 @@ func TestMoveTablesTZ(t *testing.T) { workflow := "tz" sourceKs := "product" targetKs := "customer" - shard := "0" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) @@ -47,9 +46,6 @@ func TestMoveTablesTZ(t *testing.T) { cell1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -84,9 +80,6 @@ func TestMoveTablesTZ(t *testing.T) { if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index cd64e625611..d2f20b5204e 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -29,7 +29,6 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/sqlparser" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -135,9 +134,6 @@ func TestVDiff2(t *testing.T) { vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - for _, shard := range sourceShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)) - } vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -159,9 +155,6 @@ func TestVDiff2(t *testing.T) { // We ONLY add primary tablets in this test. tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) require.NoError(t, err) - for _, shard := range targetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)) - } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -175,9 +168,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) - for _, shard := range arrTargetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, tc.targetKs, shard)) - } + } ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) var args []string diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 2ceb11677f5..a4c25941801 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -43,21 +42,11 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) targetTabletId := 200 targetKeyspace := "customer" vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) index := 1000 var loadCtx context.Context diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 7cb892cefb7..0e44f1037ae 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -118,6 +118,7 @@ func throttlerCheckSelf(tablet *cluster.VttabletProcess, throttlerApp throttlera // NOTE: this is a manual test. It is not executed in the // CI. func TestVReplicationDDLHandling(t *testing.T) { + var err error workflow := "onddl_test" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) table := "orders" @@ -136,10 +137,7 @@ func TestVReplicationDDLHandling(t *testing.T) { } vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) + verifyClusterHealth(t, vc) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -251,10 +249,6 @@ func TestVreplicationCopyThrottling(t *testing.T) { } vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) // Confirm that the initial copy table phase does not proceed until the source tablet(s) // have an InnoDB History List length that is less than specified in the tablet's config. @@ -297,6 +291,7 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { + var err error defaultCellName := "zone1" vc = NewVitessCluster(t, nil) // Keep the cluster processes minimal to deal with CI resource constraints @@ -314,8 +309,6 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -437,10 +430,6 @@ func TestVStreamFlushBinlog(t *testing.T) { } vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) verifyClusterHealth(t, vc) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -535,7 +524,7 @@ func testVStreamCellFlag(t *testing.T) { flags.CellPreference = "onlyspecified" } - ctx2, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx2, cancel := context.WithTimeout(ctx, 10*time.Second) reader, err := conn.VStream(ctx2, topodatapb.TabletType_REPLICA, vgtid, filter, flags) require.NoError(t, err) @@ -598,20 +587,12 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { result, err := vc.VtctlClient.ExecuteCommandWithOutput("AddCellsAlias", "--", "--cells", "zone2", "alias") require.NoError(t, err, "command failed with output: %v", result) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err = cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) + vtgate = cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, keyspace, 2) + testVStreamFrom(t, vtgate, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) isTableInDenyList(t, vc, "product:0", "customer") @@ -620,7 +601,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { } // testVStreamFrom confirms that the "vstream * from" endpoint is serving data -func testVStreamFrom(t *testing.T, table string, expectedRowCount int) { +func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, expectedRowCount int) { ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -692,11 +673,6 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "80-") - require.NoError(t, err) - // Assume we are operating on first cell defaultCell := cells[0] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] @@ -1025,12 +1001,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) - arrTargetShardNames := strings.Split(targetShards, ",") - for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) - } tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary") // Test multi-primary setups, like a Galera cluster, which have auto increment steps > 1. @@ -1130,10 +1101,6 @@ func shardMerchant(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "80-") - require.NoError(t, err) moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace] merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index 5b0a2548dfb..6ca8dcfe472 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -45,12 +44,8 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() // ch is used to signal that there is significant data inserted into the tables and when a lot of vschema changes have been applied diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index e7d19947cb2..506c3a543bf 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -52,16 +51,11 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) insertInitialData(t) + vtgate = defaultCell.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, "product", 2) + testVStreamFrom(t, vtgate, "product", 2) }) ctx := context.Background() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) @@ -90,6 +84,9 @@ func testVStreamWithFailover(t *testing.T, failover bool) { stopInserting := false id := 0 + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + // first goroutine that keeps inserting rows into table being streamed until some time elapses after second PRS go func() { for { @@ -252,8 +249,6 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "unsharded", "0") - require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -396,9 +391,6 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() From 6dcc94e585ea20c29bec503351beb6c85cf133dd Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 15:50:14 +0100 Subject: [PATCH 13/21] Fix failing workflows Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 6 ++++-- go/test/endtoend/vreplication/fk_ext_test.go | 8 +------- .../endtoend/vreplication/resharding_workflows_v2_test.go | 6 ------ go/test/endtoend/vreplication/vstream_test.go | 2 +- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index f63b6527d49..86bf4e9727a 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -702,7 +702,8 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa waitTimeout := 30 * time.Second vtgate := cells[0].Vtgates[0] - for _, shard := range keyspace.Shards { + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] numReplicas, numRDOnly := 0, 0 for _, tablet := range shard.Tablets { switch strings.ToLower(tablet.Vttablet.TabletType) { @@ -732,7 +733,8 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa require.NoError(t, err) log.Infof("Waiting for throttler config to be applied on all shards") - for _, shard := range keyspace.Shards { + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] for _, tablet := range shard.Tablets { clusterTablet := &cluster.Vttablet{ Alias: tablet.Name, diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index ef04f0e6996..401b99360d8 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -151,7 +151,6 @@ func TestFKExt(t *testing.T) { require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, threeShards, numReplicas, 0, tabletID, nil)) tablets := make(map[string]*cluster.VttabletProcess) for i, shard := range strings.Split(threeShards, ",") { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID+i*100)].Vttablet } sqls := strings.Split(FKExtSourceSchema, "\n") @@ -167,7 +166,6 @@ func TestFKExt(t *testing.T) { shard := "0" require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, shard, numReplicas, 0, tabletID, nil)) tablets := make(map[string]*cluster.VttabletProcess) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID)].Vttablet sqls := strings.Split(FKExtSourceSchema, "\n") for _, sql := range sqls { @@ -341,13 +339,9 @@ func moveKeyspace(t *testing.T) { func newKeyspace(t *testing.T, keyspaceName, shards, vschema, schema string, tabletId, numReplicas int) map[string]*cluster.VttabletProcess { tablets := make(map[string]*cluster.VttabletProcess) - cellName := fkextConfig.cell cell := vc.Cells[fkextConfig.cell] + vtgate := cell.Vtgates[0] vc.AddKeyspace(t, []*Cell{cell}, keyspaceName, shards, vschema, schema, numReplicas, 0, tabletId, nil) - for i, shard := range strings.Split(shards, ",") { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, shardStatusWaitTimeout)) - tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletId+i*100)].Vttablet - } err := vc.VtctldClient.ExecuteCommand("RebuildVSchemaGraph") require.NoError(t, err) require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "parent", "id")) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 546ca0f4ae3..f6260a190b8 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -810,12 +810,6 @@ func createAdditionalCustomerShards(t *testing.T, shards string) { defaultCell := vc.Cells[vc.CellNames[0]] keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - arrTargetShardNames := strings.Split(shards, ",") - - for _, shardName := range arrTargetShardNames { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second)) - } custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 506c3a543bf..2a00ea1bb9c 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -43,11 +43,11 @@ import ( // - We stream only from the primary and while streaming we reparent to a replica and then back to the original primary func testVStreamWithFailover(t *testing.T, failover bool) { vc = NewVitessCluster(t, nil) + defer vc.TearDown() require.NotNil(t, vc) defaultReplicas = 2 defaultRdonly = 0 - defer vc.TearDown() defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) From 4a8f428b833709a54c2c68a19ffe9134f2f52ec6 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 18:41:14 +0100 Subject: [PATCH 14/21] Fix incorrect change to SQLFuture.java Signed-off-by: Rohit Nayak --- java/client/src/main/java/io/vitess/client/SQLFuture.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/java/client/src/main/java/io/vitess/client/SQLFuture.java b/java/client/src/main/java/io/vitess/client/SQLFuture.java index 5804b7079ad..0fcfb4521bc 100644 --- a/java/client/src/main/java/io/vitess/client/SQLFuture.java +++ b/java/client/src/main/java/io/vitess/client/SQLFuture.java @@ -38,7 +38,7 @@ *

* For users who want to get results synchronously, we provide {@link #checkedGet()} as a * convenience method. Unlike {@link #get()}, it throws only {@code SQLException}, so e.g. {@code - * vtgateConn2.execute(...).checkedGet()} behaves the same as our old synchronous API. + * vtgateConn.execute(...).checkedGet()} behaves the same as our old synchronous API. * *

* The additional methods are similar to the {@code CheckedFuture} interface (marked as beta), but @@ -60,7 +60,7 @@ public SQLFuture(ListenableFuture delegate) { * *

* This can be used to effectively turn the Vitess client into a synchronous API. For example: - * {@code Cursor cursor = vtgateConn2.execute(...).checkedGet();} + * {@code Cursor cursor = vtgateConn.execute(...).checkedGet();} */ public V checkedGet() throws SQLException { try { @@ -78,7 +78,7 @@ public V checkedGet() throws SQLException { * *

* This can be used to effectively turn the Vitess client into a synchronous API. For example: - * {@code Cursor cursor = vtgateConn2.execute(...).checkedGet();} + * {@code Cursor cursor = vtgateConn.execute(...).checkedGet();} */ public V checkedGet(long timeout, TimeUnit unit) throws TimeoutException, SQLException { try { From 6ea23d38144be4e271d6bf1d7774c691f7ce5e04 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 18:53:40 +0100 Subject: [PATCH 15/21] Remove couple of global variables Signed-off-by: Rohit Nayak --- .../endtoend/vreplication/resharding_workflows_v2_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index f6260a190b8..703b2cbd719 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -40,9 +40,7 @@ const ( targetKs = "customer" ksWorkflow = targetKs + "." + workflowName reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - tablesToMove = "customer" defaultCellName = "zone1" - readQuery = "select cid from customer" ) const ( @@ -84,7 +82,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { - tables = tablesToMove + tables = "customer" } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) @@ -97,7 +95,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", defaultWorkflowExecOptions) + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, @@ -232,6 +230,7 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { + readQuery := "select * from customer" assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery) } } From 6f7577a88adffaa831ba526978642638a4e7939f Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 19:09:24 +0100 Subject: [PATCH 16/21] Remove vtgte global variable Signed-off-by: Rohit Nayak --- .../endtoend/vreplication/materialize_test.go | 4 ---- go/test/endtoend/vreplication/migrate_test.go | 4 +--- .../endtoend/vreplication/performance_test.go | 4 ---- .../resharding_workflows_v2_test.go | 4 ---- go/test/endtoend/vreplication/vdiff2_test.go | 6 +----- .../endtoend/vreplication/vreplication_test.go | 17 +++++++---------- go/test/endtoend/vreplication/vstream_test.go | 6 +----- 7 files changed, 10 insertions(+), 35 deletions(-) diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index 385afb02d1b..486692a58ba 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -72,8 +72,6 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { defer vc.TearDown() defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil) @@ -185,8 +183,6 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 727dab4f69a..5d927054000 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -55,7 +55,7 @@ func TestVtctlMigrate(t *testing.T) { defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -176,8 +176,6 @@ func TestVtctldMigrate(t *testing.T) { _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate, "failed to get vtgate") vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index 75fb900fd91..6940665c842 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -23,8 +23,6 @@ import ( "time" "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/require" ) func TestReplicationStress(t *testing.T) { @@ -55,8 +53,6 @@ create table customer(cid int, name varbinary(128), meta json default null, typ defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 703b2cbd719..d18f872f6fe 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -669,10 +669,6 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, 0, 0, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) - verifyClusterHealth(t, vc) insertInitialData(t) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index d2f20b5204e..7719de0fab1 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -125,17 +125,13 @@ func TestVDiff2(t *testing.T) { zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] zone3 := vc.Cells["zone3"] - defaultCell := zone1 // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) require.NoError(t, err) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 0e44f1037ae..6f73f84d30b 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -51,7 +51,6 @@ import ( var ( vc *VitessCluster - vtgate *cluster.VtgateProcess defaultRdonly int defaultReplicas int sourceKsOpts = make(map[string]string) @@ -135,7 +134,7 @@ func TestVReplicationDDLHandling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) verifyClusterHealth(t, vc) @@ -247,7 +246,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) // Confirm that the initial copy table phase does not proceed until the source tablet(s) @@ -307,7 +306,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string defaultCell := vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -428,12 +427,8 @@ func TestVStreamFlushBinlog(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) verifyClusterHealth(t, vc) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) insertInitialData(t) @@ -449,7 +444,9 @@ func TestVStreamFlushBinlog(t *testing.T) { // Generate a lot of binlog event bytes targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 1024 - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', '%s', now())" for i := 100; i < 10000; i++ { randStr, err := randHex(6500) @@ -590,7 +587,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { verifyClusterHealth(t, vc) insertInitialData(t) - vtgate = cell1.Vtgates[0] + vtgate := cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { testVStreamFrom(t, vtgate, keyspace, 2) }) diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 2a00ea1bb9c..1ddcc42377c 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -53,7 +53,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { testVStreamFrom(t, vtgate, "product", 2) }) @@ -247,11 +247,7 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) // some initial data From 44de1037705d9cbc64e24440d4778e10779763d9 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 13 Dec 2023 19:21:17 +0100 Subject: [PATCH 17/21] Minor mods Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/helper_test.go | 2 -- go/test/endtoend/vreplication/movetables_buffering_test.go | 1 - .../endtoend/vreplication/partial_movetables_seq_test.go | 7 +++---- .../endtoend/vreplication/resharding_workflows_v2_test.go | 1 - 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index f221175dc9c..54d057fe6e9 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -795,8 +795,6 @@ func (lg *loadGenerator) stop() { log.Infof("Canceling load") lg.cancel() time.Sleep(loadTestWaitForCancel) // wait for cancel to take effect - log.Flush() - } func (lg *loadGenerator) start() { diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index a9199423012..e853022bfd4 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -40,5 +40,4 @@ func TestMoveTablesBuffering(t *testing.T) { lg.stop() log.Infof("TestMoveTablesBuffering: done") - log.Flush() } diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index 9491363aed2..1b8cd3c4a56 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -74,7 +74,7 @@ type vrepTestCase struct { vtgate *cluster.VtgateProcess } -func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCase { +func initPartialMoveTablesComplexTestCase(t *testing.T) *vrepTestCase { const ( seqVSchema = `{ "sharded": false, @@ -121,7 +121,7 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa ) tc := &vrepTestCase{ t: t, - testName: name, + testName: t.Name(), keyspaces: make(map[string]*keyspace), defaultCellName: "zone1", workflows: make(map[string]*workflow), @@ -283,7 +283,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() - tc := initPartialMoveTablesComplexTestCase(t, "TestPartialMoveTablesComplex") + tc := initPartialMoveTablesComplexTestCase(t) defer tc.teardown() var err error @@ -346,7 +346,6 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { }) currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") - log.Flush() // This query uses an ID that should always get routed to shard 80- shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index d18f872f6fe..e44f856c223 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -309,7 +309,6 @@ func TestBasicV2Workflows(t *testing.T) { testMoveTablesV2Workflow(t) testReshardV2Workflow(t) - log.Flush() } func getVtctldGRPCURL() string { From 08339d9dfe6f634763b8e9d20d9806aac988d354 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Fri, 15 Dec 2023 00:08:35 +0100 Subject: [PATCH 18/21] Minor mods Signed-off-by: Rohit Nayak --- .../partial_movetables_seq_test.go | 16 ++++++------- .../vreplication/partial_movetables_test.go | 24 +++++++++---------- .../vreplication/vreplication_test.go | 4 +--- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index 1b8cd3c4a56..bb354a5ec01 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -348,9 +348,9 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // Reset any existing vtgate connection state. closeConn() @@ -372,14 +372,14 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") @@ -413,22 +413,22 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { t.Run("Validate shard and tablet type routing", func(t *testing.T) { // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 3f68841cb42..877df230ce6 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -183,9 +183,9 @@ func TestPartialMoveTablesBasic(t *testing.T) { } // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // reset any existing vtgate connection state vtgateConn.Close() @@ -206,14 +206,14 @@ func TestPartialMoveTablesBasic(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") @@ -237,40 +237,40 @@ func TestPartialMoveTablesBasic(t *testing.T) { defer vtgateConn.Close() // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") // Tablet type targeting _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 6f73f84d30b..99642889742 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -293,6 +293,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string var err error defaultCellName := "zone1" vc = NewVitessCluster(t, nil) + defer vc.TearDown() // Keep the cluster processes minimal to deal with CI resource constraints defaultReplicas = 0 defaultRdonly = 0 @@ -302,12 +303,9 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) } - defer vc.TearDown() defaultCell := vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - vtgate := defaultCell.Vtgates[0] - require.NotNil(t, vtgate) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() From 7942f39785291418492d0dfc01c170a4b6388bd3 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Sun, 17 Dec 2023 19:14:46 +0100 Subject: [PATCH 19/21] Added a comment Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 86bf4e9727a..234d820d748 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -683,6 +683,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa if err := tablet.Vttablet.Setup(); err != nil { t.Fatalf(err.Error()) } + // Set time_zone to UTC for all tablets. Without this it fails locally on some MacOS setups. query := "SET GLOBAL time_zone = '+00:00';" qr, err := tablet.Vttablet.QueryTablet(query, tablet.Vttablet.Keyspace, false) if err != nil { From 4ffe54e37d993424cd13818f1c4129137c15d817 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Tue, 19 Dec 2023 12:52:22 +0100 Subject: [PATCH 20/21] Address review comments Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/cluster_test.go | 8 ++++++-- go/test/endtoend/vreplication/vreplication_test.go | 6 +++--- go/test/endtoend/vreplication/vstream_test.go | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 234d820d748..7d22d063945 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -89,14 +89,18 @@ type ClusterConfig struct { vreplicationCompressGTID bool } -func (cc *ClusterConfig) compressGTID() func() { +// enableGTIDCompression enables GTID compression for the cluster and returns a function +// that can be used to disable it in a defer. +func (cc *ClusterConfig) enableGTIDCompression() func() { cc.vreplicationCompressGTID = true return func() { cc.vreplicationCompressGTID = false } } -func setVTTabletExperimentalFlags() func() { +// setAllVTTabletExperimentalFlags sets all the experimental flags for vttablet and returns a function +// that can be used to reset them in a defer. +func setAllVTTabletExperimentalFlags() func() { experimentalArgs := fmt.Sprintf("--vreplication_experimental_flags=%d", vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching) oldArgs := extraVTTabletArgs diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 99642889742..4e50ea12af3 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -269,7 +269,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { } func TestBasicVreplicationWorkflow(t *testing.T) { - defer setVTTabletExperimentalFlags() + defer setAllVTTabletExperimentalFlags() sourceKsOpts["DBTypeVersion"] = "mysql-8.0" targetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") @@ -563,8 +563,8 @@ func testVStreamCellFlag(t *testing.T) { // We also reuse the setup of this test to validate that the "vstream * from" vtgate query functionality is functional func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - defer mainClusterConfig.compressGTID() - defer setVTTabletExperimentalFlags() + defer mainClusterConfig.enableGTIDCompression() + defer setAllVTTabletExperimentalFlags() vc = NewVitessCluster(t, &clusterOptions{cells: cells}) defer vc.TearDown() diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 1ddcc42377c..8b21cf6fb60 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -215,6 +215,7 @@ const vschemaSharded = ` func insertRow(keyspace, table string, id int) { vtgateConn := getConnectionNoError(vc.t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + // Due to race conditions this call is sometimes made after vtgates have shutdown. In that case just return. if vtgateConn == nil { return } From 331291edac6c1f4779856db37d6e653cbb914112 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Wed, 27 Dec 2023 12:52:47 +0100 Subject: [PATCH 21/21] Fix merge issue Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/vdiff2_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 7719de0fab1..e719911a63b 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -144,8 +144,8 @@ func TestVDiff2(t *testing.T) { generateMoreCustomers(t, sourceKs, 1000) // Create rows in the nopk table using the customer names and random ages between 20 and 100. - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s.nopk(name, age) select name, floor(rand()*80)+20 from %s.customer", sourceKs, sourceKs), -1, false) - require.NoError(t, err, "failed to insert rows into nopk table: %v", err) + query = "insert into nopk(name, age) select name, floor(rand()*80)+20 from customer" + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test.