From 1ee8f288d6d138ca1a61e19126ca8fe63165f8af Mon Sep 17 00:00:00 2001 From: Rohit Nayak <57520317+rohit-nayak-ps@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:41:37 +0100 Subject: [PATCH] Some VReplication e2e Refactoring (#14735) Signed-off-by: Rohit Nayak --- ...luster_endtoend_vreplication_multicell.yml | 170 -------------- ...on_partial_movetables_and_materialize.yml} | 10 +- ...plication_partial_movetables_sequences.yml | 170 -------------- ...lover.yml => cluster_endtoend_vstream.yml} | 10 +- ...r_endtoend_vstream_stoponreshard_false.yml | 148 ------------ ...er_endtoend_vstream_stoponreshard_true.yml | 148 ------------ ...dtoend_vstream_with_keyspaces_to_watch.yml | 148 ------------ go/test/endtoend/vreplication/cluster_test.go | 164 ++++++++++--- go/test/endtoend/vreplication/fk_ext_test.go | 31 +-- go/test/endtoend/vreplication/fk_test.go | 28 +-- go/test/endtoend/vreplication/helper_test.go | 29 ++- .../vreplication/initial_data_test.go | 14 ++ .../endtoend/vreplication/materialize_test.go | 40 +--- go/test/endtoend/vreplication/migrate_test.go | 55 ++--- .../vreplication/movetables_buffering_test.go | 4 +- .../partial_movetables_seq_test.go | 48 ++-- .../vreplication/partial_movetables_test.go | 29 +-- .../endtoend/vreplication/performance_test.go | 20 +- .../resharding_workflows_v2_test.go | 91 +++----- .../endtoend/vreplication/sidecardb_test.go | 13 +- .../endtoend/vreplication/time_zone_test.go | 22 +- go/test/endtoend/vreplication/vdiff2_test.go | 35 ++- .../vreplication/vdiff_helper_test.go | 4 +- .../vdiff_multiple_movetables_test.go | 25 +- .../vreplication/vreplication_test.go | 220 +++++++----------- .../vreplication/vschema_load_test.go | 21 +- go/test/endtoend/vreplication/vstream_test.go | 104 ++++----- .../vreplication/vplayer_flaky_test.go | 18 +- test/ci_workflow_gen.go | 13 +- test/config.json | 27 +-- 30 files changed, 482 insertions(+), 1377 deletions(-) delete mode 100644 .github/workflows/cluster_endtoend_vreplication_multicell.yml rename .github/workflows/{cluster_endtoend_vreplication_partial_movetables_basic.yml => cluster_endtoend_vreplication_partial_movetables_and_materialize.yml} (95%) delete mode 100644 .github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml rename .github/workflows/{cluster_endtoend_vstream_failover.yml => cluster_endtoend_vstream.yml} (94%) delete mode 100644 .github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml delete mode 100644 .github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml delete mode 100644 .github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml deleted file mode 100644 index 804e21fc042..00000000000 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ /dev/null @@ -1,170 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vreplication_multicell) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multicell)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vreplication_multicell) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_multicell.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # Increase our open file descriptor limit as we could hit this - ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf - innodb_buffer_pool_dump_at_shutdown=OFF - innodb_buffer_pool_in_core_file=OFF - innodb_buffer_pool_load_at_startup=OFF - innodb_buffer_pool_size=64M - innodb_doublewrite=OFF - innodb_flush_log_at_trx_commit=0 - innodb_flush_method=O_DIRECT - innodb_numa_interleave=ON - innodb_adaptive_hash_index=OFF - sync_binlog=0 - sync_relay_log=0 - performance_schema=OFF - slow-query-log=OFF - EOF - - cat <<-EOF>>./config/mycnf/mysql80.cnf - binlog-transaction-compression=ON - EOF - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml similarity index 95% rename from .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml index 3a9af3b52b8..f748a2e60d1 100644 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vreplication_partial_movetables_basic) +name: Cluster (vreplication_partial_movetables_and_materialize) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_and_materialize)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic) + name: Run endtoend tests on Cluster (vreplication_partial_movetables_and_materialize) runs-on: gh-hosted-runners-4cores-1 steps: @@ -65,7 +65,7 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -156,7 +156,7 @@ jobs: EOF # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_and_materialize | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml deleted file mode 100644 index bad123c2ea4..00000000000 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml +++ /dev/null @@ -1,170 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vreplication_partial_movetables_sequences) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # Increase our open file descriptor limit as we could hit this - ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf - innodb_buffer_pool_dump_at_shutdown=OFF - innodb_buffer_pool_in_core_file=OFF - innodb_buffer_pool_load_at_startup=OFF - innodb_buffer_pool_size=64M - innodb_doublewrite=OFF - innodb_flush_log_at_trx_commit=0 - innodb_flush_method=O_DIRECT - innodb_numa_interleave=ON - innodb_adaptive_hash_index=OFF - sync_binlog=0 - sync_relay_log=0 - performance_schema=OFF - slow-query-log=OFF - EOF - - cat <<-EOF>>./config/mycnf/mysql80.cnf - binlog-transaction-compression=ON - EOF - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream.yml similarity index 94% rename from .github/workflows/cluster_endtoend_vstream_failover.yml rename to .github/workflows/cluster_endtoend_vstream.yml index 8af2b0ad1bd..6d8d11bf912 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vstream_failover) +name: Cluster (vstream) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_failover)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vstream_failover) + name: Run endtoend tests on Cluster (vstream) runs-on: gh-hosted-runners-4cores-1 steps: @@ -65,7 +65,7 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_failover.yml' + - '.github/workflows/cluster_endtoend_vstream.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,7 +134,7 @@ jobs: set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vstream | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml deleted file mode 100644 index c3a39b76267..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_false) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_false)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_false) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml deleted file mode 100644 index 02979f5cbc8..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_true) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_true)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_true) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml deleted file mode 100644 index 449c49974db..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_with_keyspaces_to_watch) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_with_keyspaces_to_watch)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.5 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 8993f1257da..7d22d063945 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vttablet" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/mysqlctl" @@ -87,11 +89,33 @@ type ClusterConfig struct { vreplicationCompressGTID bool } +// enableGTIDCompression enables GTID compression for the cluster and returns a function +// that can be used to disable it in a defer. +func (cc *ClusterConfig) enableGTIDCompression() func() { + cc.vreplicationCompressGTID = true + return func() { + cc.vreplicationCompressGTID = false + } +} + +// setAllVTTabletExperimentalFlags sets all the experimental flags for vttablet and returns a function +// that can be used to reset them in a defer. +func setAllVTTabletExperimentalFlags() func() { + experimentalArgs := fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching) + oldArgs := extraVTTabletArgs + extraVTTabletArgs = append(extraVTTabletArgs, experimentalArgs) + return func() { + extraVTTabletArgs = oldArgs + } +} + // VitessCluster represents all components within the test cluster type VitessCluster struct { t *testing.T ClusterConfig *ClusterConfig Name string + CellNames []string Cells map[string]*Cell Topo *cluster.TopoProcess Vtctld *cluster.VtctldProcess @@ -115,6 +139,9 @@ type Keyspace struct { VSchema string Schema string SidecarDBName string + + numReplicas int + numRDOnly int } // Shard represents a Vitess shard in a keyspace @@ -332,9 +359,28 @@ func init() { externalClusterConfig = getClusterConfig(1, mainVtDataRoot+"/ext") } +type clusterOptions struct { + cells []string + clusterConfig *ClusterConfig +} + +func getClusterOptions(opts *clusterOptions) *clusterOptions { + if opts == nil { + opts = &clusterOptions{} + } + if opts.cells == nil { + opts.cells = []string{"zone1"} + } + if opts.clusterConfig == nil { + opts.clusterConfig = mainClusterConfig + } + return opts +} + // NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo -func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { - vc := &VitessCluster{t: t, Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} +func NewVitessCluster(t *testing.T, opts *clusterOptions) *VitessCluster { + opts = getClusterOptions(opts) + vc := &VitessCluster{t: t, Name: t.Name(), CellNames: opts.cells, Cells: make(map[string]*Cell), ClusterConfig: opts.clusterConfig} require.NotNil(t, vc) vc.CleanupDataroot(t, true) @@ -346,32 +392,46 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf err := topo.ManageTopoDir("mkdir", "/vitess/global") require.NoError(t, err) vc.Topo = topo - for _, cellName := range cellNames { + for _, cellName := range opts.cells { err := topo.ManageTopoDir("mkdir", "/vitess/"+cellName) require.NoError(t, err) } - vtctld := cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, + vc.setupVtctld() + vc.setupVtctl() + vc.setupVtctlClient() + vc.setupVtctldClient() + + return vc +} + +func (vc *VitessCluster) setupVtctld() { + vc.Vtctld = cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir) - vc.Vtctld = vtctld - require.NotNil(t, vc.Vtctld) + require.NotNil(vc.t, vc.Vtctld) // use first cell as `-cell` - vc.Vtctld.Setup(cellNames[0], extraVtctldArgs...) + vc.Vtctld.Setup(vc.CellNames[0], extraVtctldArgs...) +} +func (vc *VitessCluster) setupVtctl() { vc.Vtctl = cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname) - require.NotNil(t, vc.Vtctl) - for _, cellName := range cellNames { + require.NotNil(vc.t, vc.Vtctl) + for _, cellName := range vc.CellNames { vc.Vtctl.AddCellInfo(cellName) - cell, err := vc.AddCell(t, cellName) - require.NoError(t, err) - require.NotNil(t, cell) + cell, err := vc.AddCell(vc.t, cellName) + require.NoError(vc.t, err) + require.NotNil(vc.t, cell) } +} +func (vc *VitessCluster) setupVtctlClient() { vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctlClient) + require.NotNil(vc.t, vc.VtctlClient) +} + +func (vc *VitessCluster) setupVtctldClient() { vc.VtctldClient = cluster.VtctldClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctldClient) - return vc + require.NotNil(vc.t, vc.VtctldClient) } // CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard, @@ -430,8 +490,14 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, cell.Keyspaces[ksName] = keyspace cellsToWatch = cellsToWatch + cell.Name } - require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) + for _, cell := range cells { + if len(cell.Vtgates) == 0 { + log.Infof("Starting vtgate") + vc.StartVtgate(t, cell, cellsToWatch) + } + } + require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) if schema != "" { if err := vc.VtctlClient.ApplySchema(ksName, schema); err != nil { t.Fatalf(err.Error()) @@ -444,12 +510,6 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, } } keyspace.VSchema = vschema - for _, cell := range cells { - if len(cell.Vtgates) == 0 { - log.Infof("Starting vtgate") - vc.StartVtgate(t, cell, cellsToWatch) - } - } err = vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", ksName) require.NoError(t, err) @@ -525,11 +585,11 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } - arrNames := strings.Split(names, ",") - log.Infof("Addshards got %d shards with %+v", len(arrNames), arrNames) - isSharded := len(arrNames) > 1 + shardNames := strings.Split(names, ",") + log.Infof("Addshards got %d shards with %+v", len(shardNames), shardNames) + isSharded := len(shardNames) > 1 primaryTabletUID := 0 - for ind, shardName := range arrNames { + for ind, shardName := range shardNames { tabletID := tabletIDBase + ind*100 tabletIndex := 0 shard := &Shard{Name: shardName, IsSharded: isSharded, Tablets: make(map[string]*Tablet, 1)} @@ -627,6 +687,12 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa if err := tablet.Vttablet.Setup(); err != nil { t.Fatalf(err.Error()) } + // Set time_zone to UTC for all tablets. Without this it fails locally on some MacOS setups. + query := "SET GLOBAL time_zone = '+00:00';" + qr, err := tablet.Vttablet.QueryTablet(query, tablet.Vttablet.Keyspace, false) + if err != nil { + t.Fatalf("failed to set time_zone: %v, output: %v", err, qr) + } } } require.NotEqual(t, 0, primaryTabletUID, "Should have created a primary tablet") @@ -635,12 +701,45 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa log.Infof("Finished creating shard %s", shard.Name) } + for _, shard := range shardNames { + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, keyspace.Name, shard)) + } + + waitTimeout := 30 * time.Second + vtgate := cells[0].Vtgates[0] + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] + numReplicas, numRDOnly := 0, 0 + for _, tablet := range shard.Tablets { + switch strings.ToLower(tablet.Vttablet.TabletType) { + case "replica": + numReplicas++ + case "rdonly": + numRDOnly++ + } + } + numReplicas-- // account for primary, which also has replica type + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, waitTimeout); err != nil { + return err + } + if numReplicas > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), numReplicas, waitTimeout); err != nil { + return err + } + } + if numRDOnly > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), numRDOnly, waitTimeout); err != nil { + return err + } + } + } err := vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", keyspace.Name) require.NoError(t, err) log.Infof("Waiting for throttler config to be applied on all shards") - for _, shard := range keyspace.Shards { + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] for _, tablet := range shard.Tablets { clusterTablet := &cluster.Vttablet{ Alias: tablet.Name, @@ -762,7 +861,7 @@ func (vc *VitessCluster) teardown() { } // TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown(t *testing.T) { +func (vc *VitessCluster) TearDown() { if debugMode { return } @@ -779,7 +878,7 @@ func (vc *VitessCluster) TearDown(t *testing.T) { } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) - vc.CleanupDataroot(t, false) + vc.CleanupDataroot(vc.t, false) } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { @@ -821,6 +920,13 @@ func (vc *VitessCluster) GetVTGateConn(t *testing.T) *mysql.Conn { return getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) } +func getVTGateConn() (*mysql.Conn, func()) { + vtgateConn := vc.GetVTGateConn(vc.t) + return vtgateConn, func() { + vtgateConn.Close() + } +} + func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing.T), func(t *testing.T)) { conn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) _, err := conn.ExecuteFetch("begin", 1000, false) diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index a06fafb257e..401b99360d8 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -97,27 +97,18 @@ func TestFKExt(t *testing.T) { cellName := fkextConfig.cell cells := []string{cellName} - vc = NewVitessCluster(t, t.Name(), cells, fkextConfig.ClusterConfig) - - require.NotNil(t, vc) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] + vc = NewVitessCluster(t, &clusterOptions{ + cells: cells, + clusterConfig: fkextConfig.ClusterConfig, + }) + defaultCell := vc.Cells[vc.CellNames[0]] cell := vc.Cells[cellName] - defer vc.TearDown(t) + defer vc.TearDown() sourceKeyspace := fkextConfig.sourceKeyspaceName vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, "0", FKExtSourceVSchema, FKExtSourceSchema, 0, 0, 100, nil) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, "0"), 1, shardStatusWaitTimeout)) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) lg = &SimpleLoadGenerator{} @@ -160,7 +151,6 @@ func TestFKExt(t *testing.T) { require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, threeShards, numReplicas, 0, tabletID, nil)) tablets := make(map[string]*cluster.VttabletProcess) for i, shard := range strings.Split(threeShards, ",") { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID+i*100)].Vttablet } sqls := strings.Split(FKExtSourceSchema, "\n") @@ -176,7 +166,6 @@ func TestFKExt(t *testing.T) { shard := "0" require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, shard, numReplicas, 0, tabletID, nil)) tablets := make(map[string]*cluster.VttabletProcess) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID)].Vttablet sqls := strings.Split(FKExtSourceSchema, "\n") for _, sql := range sqls { @@ -296,7 +285,7 @@ func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards } func areRowCountsEqual(t *testing.T) bool { - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() parentRowCount := getRowCount(t, vtgateConn, "target2.parent") childRowCount := getRowCount(t, vtgateConn, "target2.child") @@ -350,13 +339,9 @@ func moveKeyspace(t *testing.T) { func newKeyspace(t *testing.T, keyspaceName, shards, vschema, schema string, tabletId, numReplicas int) map[string]*cluster.VttabletProcess { tablets := make(map[string]*cluster.VttabletProcess) - cellName := fkextConfig.cell cell := vc.Cells[fkextConfig.cell] + vtgate := cell.Vtgates[0] vc.AddKeyspace(t, []*Cell{cell}, keyspaceName, shards, vschema, schema, numReplicas, 0, tabletId, nil) - for i, shard := range strings.Split(shards, ",") { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, shardStatusWaitTimeout)) - tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletId+i*100)].Vttablet - } err := vc.VtctldClient.ExecuteCommand("RebuildVSchemaGraph") require.NoError(t, err) require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "parent", "id")) diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 7d5f01c13db..a313de09488 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vttablet" @@ -48,35 +47,21 @@ func TestFKWorkflow(t *testing.T) { } defer func() { extraVTTabletArgs = nil }() - cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, "TestFKWorkflow", cells, mainClusterConfig) + cellName := "zone1" + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "fksource" shardName := "0" - defer vc.TearDown(t) + defer vc.TearDown() cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) + insertInitialFKData(t) var ls *fkLoadSimulator - - insertInitialFKData(t) withLoad := true // Set it to false to skip load simulation, while debugging var cancel context.CancelFunc var ctx context.Context @@ -95,7 +80,6 @@ func TestFKWorkflow(t *testing.T) { targetKeyspace := "fktarget" targetTabletId := 200 vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) workflowName := "fk" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) @@ -142,6 +126,8 @@ func TestFKWorkflow(t *testing.T) { func insertInitialFKData(t *testing.T) { t.Run("insertInitialFKData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sourceKeyspace := "fksource" shard := "0" db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) @@ -277,6 +263,8 @@ func (ls *fkLoadSimulator) delete() { func (ls *fkLoadSimulator) exec(query string) *sqltypes.Result { t := ls.t + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "fksource", query) require.NotNil(t, qr) return qr diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index fc7d66bc732..54d057fe6e9 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -103,6 +103,19 @@ func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { require.NoError(t, err) return qr } +func getConnectionNoError(t *testing.T, hostname string, port int) *mysql.Conn { + vtParams := mysql.ConnParams{ + Host: hostname, + Port: port, + Uname: "vt_dba", + } + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + if err != nil { + return nil + } + return conn +} func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { vtParams := mysql.ConnParams{ @@ -782,8 +795,6 @@ func (lg *loadGenerator) stop() { log.Infof("Canceling load") lg.cancel() time.Sleep(loadTestWaitForCancel) // wait for cancel to take effect - log.Flush() - } func (lg *loadGenerator) start() { @@ -907,3 +918,17 @@ func waitForCondition(name string, condition func() bool, timeout time.Duration) } } } + +func getCellNames(cells []*Cell) string { + var cellNames []string + if cells == nil { + cells = []*Cell{} + for _, cell := range vc.Cells { + cells = append(cells, cell) + } + } + for _, cell := range cells { + cellNames = append(cellNames, cell.Name) + } + return strings.Join(cellNames, ",") +} diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index bf93a040942..23f699563e2 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -27,6 +27,8 @@ import ( func insertInitialData(t *testing.T) { t.Run("insertInitialData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") execMultipleQueries(t, vtgateConn, "product:0", string(lines)) @@ -48,6 +50,8 @@ const NumJSONRows = 100 func insertJSONValues(t *testing.T) { // insert null value combinations + vtgateConn, closeConn := getVTGateConn() + defer closeConn() execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") @@ -76,6 +80,8 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // the number of customer records we are going to // create. The value we get back is the max value // that we reserved. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. @@ -95,16 +101,22 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } func insertMoreProducts(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } @@ -122,6 +134,8 @@ var blobTableQueries = []string{ } func insertIntoBlobTable(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, query := range blobTableQueries { execVtgateQuery(t, vtgateConn, "product:0", query) } diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index 63205a56c0a..486692a58ba 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -20,8 +20,6 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) const smSchema = ` @@ -63,31 +61,21 @@ const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2 // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters func testShardedMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig) + var err error + vc = NewVitessCluster(t, nil) ks1 := "ks1" ks2 := "ks2" - shard := "0" require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defer vc.TearDown() + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, ks1, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, ks2, shard) - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false) @@ -182,10 +170,8 @@ RETURN id * length(val); ` func testMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMaterialize", allCells, mainClusterConfig) + var err error + vc = NewVitessCluster(t, nil) sourceKs := "source" targetKs := "target" shard := "0" @@ -193,20 +179,14 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 75ab6a3151b..5d927054000 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -25,8 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -48,43 +46,36 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctlMigrate(t *testing.T) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, vtgate, "product", 2) + }) // create external cluster extCell := "extcell1" - extCells := []string{extCell} - extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{cells: []string{"extcell1"}, clusterConfig: externalClusterConfig}) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000, nil) extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) @@ -175,26 +166,18 @@ func TestVtctlMigrate(t *testing.T) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctldMigrate(t *testing.T) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrateVtctld", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -202,9 +185,11 @@ func TestVtctldMigrate(t *testing.T) { // create external cluster extCell := "extcell1" extCells := []string{extCell} - extVc := NewVitessCluster(t, t.Name(), extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{ + cells: extCells, + clusterConfig: externalClusterConfig, + }) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", @@ -212,8 +197,6 @@ func TestVtctldMigrate(t *testing.T) { extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 113587a1669..e853022bfd4 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -14,8 +14,7 @@ import ( func TestMoveTablesBuffering(t *testing.T) { defaultRdonly = 1 vc = setupMinimalCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() currentWorkflowType = wrangler.MoveTablesWorkflow setupMinimalCustomerKeyspace(t) @@ -41,5 +40,4 @@ func TestMoveTablesBuffering(t *testing.T) { lg.stop() log.Infof("TestMoveTablesBuffering: done") - log.Flush() } diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index f8dc440b62d..bb354a5ec01 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -20,7 +20,6 @@ import ( "fmt" "strings" "testing" - "time" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -75,7 +74,7 @@ type vrepTestCase struct { vtgate *cluster.VtgateProcess } -func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCase { +func initPartialMoveTablesComplexTestCase(t *testing.T) *vrepTestCase { const ( seqVSchema = `{ "sharded": false, @@ -122,7 +121,7 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa ) tc := &vrepTestCase{ t: t, - testName: name, + testName: t.Name(), keyspaces: make(map[string]*keyspace), defaultCellName: "zone1", workflows: make(map[string]*workflow), @@ -169,18 +168,15 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa func (tc *vrepTestCase) teardown() { tc.vtgateConn.Close() - vc.TearDown(tc.t) + vc.TearDown() } func (tc *vrepTestCase) setupCluster() { - cells := []string{"zone1"} - - tc.vc = NewVitessCluster(tc.t, tc.testName, cells, mainClusterConfig) + tc.vc = NewVitessCluster(tc.t, nil) vc = tc.vc // for backward compatibility since vc is used globally in this package require.NotNil(tc.t, tc.vc) tc.setupKeyspaces([]string{"commerce", "seqSrc"}) tc.vtgateConn = getConnection(tc.t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) - vtgateConn = tc.vtgateConn // for backward compatibility since vtgateConn is used globally in this package } func (tc *vrepTestCase) initData() { @@ -211,10 +207,6 @@ func (tc *vrepTestCase) setupKeyspace(ks *keyspace) { tc.vtgate = defaultCell.Vtgates[0] } - for _, shard := range ks.shards { - require.NoError(t, cluster.WaitForHealthyShard(tc.vc.VtctldClient, ks.name, shard)) - require.NoError(t, tc.vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks.name, shard), 1, 30*time.Second)) - } } func (tc *vrepTestCase) newWorkflow(typ, workflowName, fromKeyspace, toKeyspace string, options *workflowOptions) *workflow { @@ -291,7 +283,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() - tc := initPartialMoveTablesComplexTestCase(t, "TestPartialMoveTablesComplex") + tc := initPartialMoveTablesComplexTestCase(t) defer tc.teardown() var err error @@ -336,6 +328,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { shard := "80-" var wf80Dash, wfDash80 *workflow currentCustomerCount = getCustomerCount(t, "before customer2.80-") + vtgateConn, closeConn := getVTGateConn() t.Run("Start MoveTables on customer2.80-", func(t *testing.T) { // Now setup the customer2 keyspace so we can do a partial move tables for one of the two shards: 80-. defaultRdonly = 0 @@ -353,16 +346,17 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { }) currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") - log.Flush() // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // Reset any existing vtgate connection state. - vtgateConn.Close() - vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + closeConn() + + vtgateConn, closeConn = getVTGateConn() + defer closeConn() t.Run("Confirm routing rules", func(t *testing.T) { // Global routing rules should be in place with everything going to the source keyspace (customer). @@ -378,14 +372,14 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") @@ -419,22 +413,22 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { t.Run("Validate shard and tablet type routing", func(t *testing.T) { // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") @@ -537,6 +531,8 @@ var newCustomerCount = int64(201) var lastCustomerId int64 func getCustomerCount(t *testing.T, msg string) int64 { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select count(*) from customer") require.NotNil(t, qr) count, err := qr.Rows[0][0].ToInt64() @@ -545,6 +541,8 @@ func getCustomerCount(t *testing.T, msg string) int64 { } func confirmLastCustomerIdHasIncreased(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select cid from customer order by cid desc limit 1") require.NotNil(t, qr) currentCustomerId, err := qr.Rows[0][0].ToInt64() @@ -554,6 +552,8 @@ func confirmLastCustomerIdHasIncreased(t *testing.T) { } func insertCustomers(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for i := int64(1); i < newCustomerCount+1; i++ { execVtgateQuery(t, vtgateConn, "customer@primary", fmt.Sprintf("insert into customer(name) values ('name-%d')", currentCustomerCount+i)) } diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index d9573b50e4a..877df230ce6 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -103,8 +103,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() vc = setupMinimalCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() setupMinimalCustomerKeyspace(t) // Move customer table from unsharded product keyspace to @@ -159,6 +158,8 @@ func TestPartialMoveTablesBasic(t *testing.T) { catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2") vdiffSideBySide(t, ksWf, "") + vtgateConn, closeConn := getVTGateConn() + defer closeConn() waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- @@ -182,9 +183,9 @@ func TestPartialMoveTablesBasic(t *testing.T) { } // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // reset any existing vtgate connection state vtgateConn.Close() @@ -205,14 +206,14 @@ func TestPartialMoveTablesBasic(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") @@ -236,40 +237,40 @@ func TestPartialMoveTablesBasic(t *testing.T) { defer vtgateConn.Close() // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") // Tablet type targeting _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index 9e0ae797e72..6940665c842 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -23,8 +23,6 @@ import ( "time" "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/require" ) func TestReplicationStress(t *testing.T) { @@ -50,23 +48,13 @@ create table customer(cid int, name varbinary(128), meta json default null, typ const sourceKs = "stress_src" const targetKs = "stress_tgt" - allCells := []string{defaultCellName} - allCellNames = defaultCellName - - vc = NewVitessCluster(t, "TestReplicationStress", allCells, mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 401147a3887..e44f856c223 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -40,9 +40,7 @@ const ( targetKs = "customer" ksWorkflow = targetKs + "." + workflowName reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - tablesToMove = "customer" defaultCellName = "zone1" - readQuery = "select cid from customer" ) const ( @@ -84,7 +82,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { - tables = tablesToMove + tables = "customer" } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) @@ -97,7 +95,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", defaultWorkflowExecOptions) + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, @@ -227,9 +225,12 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl if tabletTypes == "" { tabletTypes = "replica,rdonly" } + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { + readQuery := "select * from customer" assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery) } } @@ -244,6 +245,8 @@ func validateReadsRouteToTarget(t *testing.T, tabletTypes string) { } func validateWritesRouteToSource(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) @@ -251,6 +254,8 @@ func validateWritesRouteToSource(t *testing.T) { } func validateWritesRouteToTarget(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) @@ -262,7 +267,7 @@ func validateWritesRouteToTarget(t *testing.T) { func revert(t *testing.T, workflowType string) { switchWrites(t, workflowType, ksWorkflow, true) validateWritesRouteToSource(t) - switchReadsNew(t, workflowType, allCellNames, ksWorkflow, true) + switchReadsNew(t, workflowType, getCellNames(nil), ksWorkflow, true) validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup @@ -296,8 +301,7 @@ func TestBasicV2Workflows(t *testing.T) { }() vc = setupCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() // Internal tables like the lifecycle ones for OnlineDDL should be ignored ddlSQL := "ALTER TABLE customer MODIFY cid bigint UNSIGNED" @@ -305,7 +309,6 @@ func TestBasicV2Workflows(t *testing.T) { testMoveTablesV2Workflow(t) testReshardV2Workflow(t) - log.Flush() } func getVtctldGRPCURL() string { @@ -342,6 +345,8 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // sanity check output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "product") require.NoError(t, err) @@ -406,6 +411,8 @@ func testReplicatingWithPKEnumCols(t *testing.T) { // when we re-insert the same row values and ultimately VDiff shows the table as // being identical in both keyspaces. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // typ is an enum, with soho having a stored and binlogged value of 2 deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" @@ -418,6 +425,8 @@ func testReplicatingWithPKEnumCols(t *testing.T) { } func testReshardV2Workflow(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() currentWorkflowType = wrangler.ReshardWorkflow // create internal tables on the original customer shards that should be @@ -445,6 +454,8 @@ func testReshardV2Workflow(t *testing.T) { } func testMoveTablesV2Workflow(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() currentWorkflowType = wrangler.MoveTablesWorkflow // test basic forward and reverse flows @@ -608,30 +619,17 @@ func testRestOfWorkflow(t *testing.T) { } func setupCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1", "zone2"} - - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] + vc = NewVitessCluster(t, &clusterOptions{cells: []string{"zone1", "zone2"}}) zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] vc.AddKeyspace(t, []*Cell{zone1, zone2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second)) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) - + defaultCell := vc.Cells[vc.CellNames[0]] sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-102"].Vttablet @@ -644,12 +642,7 @@ func setupCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -664,33 +657,17 @@ func setupCustomer2Keyspace(t *testing.T) { customerVSchema, customerSchema, 0, 0, 1200, nil); err != nil { t.Fatal(err) } - for _, c2shard := range c2shards { - err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard) - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second)) - } } func setupMinimalCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1"} + vc = NewVitessCluster(t, nil) - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] zone1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, 0, 0, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) verifyClusterHealth(t, vc) insertInitialData(t) @@ -704,10 +681,7 @@ func setupMinimalCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, 0, 0, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -715,7 +689,7 @@ func setupMinimalCustomerKeyspace(t *testing.T) { func TestSwitchReadsWritesInAnyOrder(t *testing.T) { vc = setupCluster(t) - defer vc.TearDown(t) + defer vc.TearDown() moveCustomerTableSwitchFlows(t, []*Cell{vc.Cells["zone1"]}, "zone1") } @@ -747,7 +721,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias catchup(t, targetTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") } - + allCellNames := getCellNames(cells) var switchReadsFollowedBySwitchWrites = func() { moveTablesAndWait() @@ -827,16 +801,9 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias func createAdditionalCustomerShards(t *testing.T, shards string) { ksName := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - arrTargetShardNames := strings.Split(shards, ",") - - for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second)) - } custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index ef05e051be2..cea74626659 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -58,15 +58,8 @@ func prs(t *testing.T, keyspace, shard string) { // TestSidecarDB launches a Vitess cluster and ensures that the expected sidecar tables are created. We also drop/alter // tables and ensure the next tablet init will recreate the sidecar database to the desired schema. func TestSidecarDB(t *testing.T) { - cells := []string{"zone1"} - - vc = NewVitessCluster(t, "TestSidecarDB", cells, mainClusterConfig) - require.NotNil(t, vc) - allCellNames = "zone1" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() keyspace := "product" shard := "0" @@ -74,7 +67,7 @@ func TestSidecarDB(t *testing.T) { cell1 := vc.Cells[defaultCellName] tablet100 := fmt.Sprintf("%s-100", defaultCellName) tablet101 := fmt.Sprintf("%s-101", defaultCellName) - vc.AddKeyspace(t, []*Cell{cell1}, keyspace, shard, initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard] tablet100Port := shard0.Tablets[tablet100].Vttablet.Port tablet101Port := shard0.Tablets[tablet101].Vttablet.Port diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2d0d1eeaf0b..ff334c593fe 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -32,31 +32,21 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { - allCellNames = "zone1" - defaultCellName := "zone1" workflow := "tz" sourceKs := "product" targetKs := "customer" - shard := "0" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", []string{"zone1"}, mainClusterConfig) - require.NotNil(t, vc) - defaultCell = vc.Cells[defaultCellName] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[vc.CellNames[0]] cells := []*Cell{defaultCell} - defer vc.TearDown(t) - cell1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -90,10 +80,6 @@ func TestMoveTablesTZ(t *testing.T) { if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) - - defaultCell := vc.Cells["zone1"] custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index d7b8cb6a47e..e719911a63b 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -29,7 +29,6 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/sqlparser" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -112,7 +111,7 @@ var testCases = []*testCase{ } func TestVDiff2(t *testing.T) { - allCellNames = "zone5,zone1,zone2,zone3,zone4" + cellNames := "zone5,zone1,zone2,zone3,zone4" sourceKs := "product" sourceShards := []string{"0"} targetKs := "customer" @@ -120,27 +119,19 @@ func TestVDiff2(t *testing.T) { // This forces us to use multiple vstream packets even with small test tables. extraVTTabletArgs = []string{"--vstream_packet_size=1"} - vc = NewVitessCluster(t, "TestVDiff2", strings.Split(allCellNames, ","), mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, &clusterOptions{cells: strings.Split(cellNames, ",")}) + defer vc.TearDown() + zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] zone3 := vc.Cells["zone3"] - defaultCell = zone1 - - defer vc.TearDown(t) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) require.NoError(t, err) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - for _, shard := range sourceShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)) - } - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -153,16 +144,13 @@ func TestVDiff2(t *testing.T) { generateMoreCustomers(t, sourceKs, 1000) // Create rows in the nopk table using the customer names and random ages between 20 and 100. - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s.nopk(name, age) select name, floor(rand()*80)+20 from %s.customer", sourceKs, sourceKs), -1, false) - require.NoError(t, err, "failed to insert rows into nopk table: %v", err) + query = "insert into nopk(name, age) select name, floor(rand()*80)+20 from customer" + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) require.NoError(t, err) - for _, shard := range targetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)) - } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -176,9 +164,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) - for _, shard := range arrTargetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, tc.targetKs, shard)) - } + } ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) var args []string @@ -187,6 +173,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, if tc.typ == "Reshard" { args = append(args, "--source_shards", tc.sourceShards, "--target_shards", tc.targetShards) } + allCellNames := getCellNames(nil) args = append(args, "--cells", allCellNames) args = append(args, "--tables", tc.tables) args = append(args, "Create") @@ -379,6 +366,8 @@ func testNoOrphanedData(t *testing.T, keyspace, workflow string, shards []string func testResume(t *testing.T, tc *testCase, cells string) { t.Run("Resume", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state @@ -422,6 +411,8 @@ func testStop(t *testing.T, ksWorkflow, cells string) { func testAutoRetryError(t *testing.T, tc *testCase, cells string) { t.Run("Auto retry on error", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 7dbc675886b..88e462d2eaa 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -34,7 +34,7 @@ import ( const ( vdiffTimeout = 90 * time.Second // we can leverage auto retry on error with this longer-than-usual timeout vdiffRetryTimeout = 30 * time.Second - vdiffStatusCheckInterval = 1 * time.Second + vdiffStatusCheckInterval = 5 * time.Second vdiffRetryInterval = 5 * time.Second ) @@ -341,6 +341,8 @@ func encodeString(in string) string { // generateMoreCustomers creates additional test data for better tests // when needed. func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Generating more test data with an additional %d customers", numCustomers) res := execVtgateQuery(t, vtgateConn, keyspace, "select max(cid) from customer") startingID, _ := res.Rows[0][0].ToInt64() diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 434ea6db3e0..a4c25941801 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -27,43 +27,26 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestMultipleConcurrentVDiffs(t *testing.T) { - cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, t.Name(), cells, mainClusterConfig) - - require.NotNil(t, vc) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] + cellName := "zone1" + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + sourceKeyspace := "product" shardName := "0" - defer vc.TearDown(t) - cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) targetTabletId := 200 targetKeyspace := "customer" vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) index := 1000 var loadCtx context.Context diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index abbdfe7f4dd..4e50ea12af3 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -39,7 +39,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vtgateconn" - "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" @@ -52,12 +51,8 @@ import ( var ( vc *VitessCluster - vtgate *cluster.VtgateProcess - defaultCell *Cell - vtgateConn *mysql.Conn defaultRdonly int defaultReplicas int - allCellNames string sourceKsOpts = make(map[string]string) targetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) @@ -122,15 +117,16 @@ func throttlerCheckSelf(tablet *cluster.VttabletProcess, throttlerApp throttlera // NOTE: this is a manual test. It is not executed in the // CI. func TestVReplicationDDLHandling(t *testing.T) { + var err error workflow := "onddl_test" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) table := "orders" newColumn := "ddltest" cell := "zone1" shard := "0" - vc = NewVitessCluster(t, t.Name(), []string{cell}, mainClusterConfig) - defer vc.TearDown(t) - defaultCell = vc.Cells[cell] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[cell] if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) @@ -138,15 +134,12 @@ func TestVReplicationDDLHandling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) + verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) targetTab := vc.getPrimaryTablet(t, targetKs, shard) @@ -234,9 +227,9 @@ func TestVreplicationCopyThrottling(t *testing.T) { cell := "zone1" table := "customer" shard := "0" - vc = NewVitessCluster(t, "TestVreplicationCopyThrottling", []string{cell}, mainClusterConfig) - defer vc.TearDown(t) - defaultCell = vc.Cells[cell] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[cell] // To test vstreamer source throttling for the MoveTables operation maxSourceTrxHistory := int64(5) extraVTTabletArgs = []string{ @@ -253,12 +246,8 @@ func TestVreplicationCopyThrottling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) // Confirm that the initial copy table phase does not proceed until the source tablet(s) // have an InnoDB History List length that is less than specified in the tablet's config. @@ -280,11 +269,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { } func TestBasicVreplicationWorkflow(t *testing.T) { - ogflags := extraVTTabletArgs - defer func() { extraVTTabletArgs = ogflags }() - // Test VPlayer batching mode. - extraVTTabletArgs = append(extraVTTabletArgs, fmt.Sprintf("--vreplication_experimental_flags=%d", - vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching)) + defer setAllVTTabletExperimentalFlags() sourceKsOpts["DBTypeVersion"] = "mysql-8.0" targetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") @@ -305,12 +290,10 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { + var err error defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) - - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() // Keep the cluster processes minimal to deal with CI resource constraints defaultReplicas = 0 defaultRdonly = 0 @@ -320,16 +303,11 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) } - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -428,49 +406,14 @@ func TestMoveTablesMariaDBToMySQL(t *testing.T) { testVreplicationWorkflows(t, true /* only do MoveTables */, "") } -func TestMultiCellVreplicationWorkflow(t *testing.T) { - cells := []string{"zone1", "zone2"} - allCellNames = strings.Join(cells, ",") - - vc = NewVitessCluster(t, "TestMultiCellVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - keyspace := "product" - shard := "0" - - defer vc.TearDown(t) - - cell1 := vc.Cells["zone1"] - cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) - insertInitialData(t) - shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name, true) - isTableInDenyList(t, vc, "product:0", "customer") - // we tag along this test so as not to create the overhead of creating another cluster - testVStreamCellFlag(t) -} - func TestVStreamFlushBinlog(t *testing.T) { defaultCellName := "zone1" - allCells := []string{defaultCellName} - allCellNames = defaultCellName workflow := "test_vstream_p2c" shard := "0" - vc = NewVitessCluster(t, "TestVStreamBinlogFlush", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defer vc.TearDown() + defaultCell := vc.Cells[defaultCellName] // Keep the cluster processes minimal (no rdonly and no replica tablets) // to deal with CI resource constraints. @@ -482,16 +425,8 @@ func TestVStreamFlushBinlog(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) insertInitialData(t) @@ -507,7 +442,9 @@ func TestVStreamFlushBinlog(t *testing.T) { // Generate a lot of binlog event bytes targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 1024 - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', '%s', now())" for i := 100; i < 10000; i++ { randStr, err := randHex(6500) @@ -582,7 +519,7 @@ func testVStreamCellFlag(t *testing.T) { flags.CellPreference = "onlyspecified" } - ctx2, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx2, cancel := context.WithTimeout(ctx, 10*time.Second) reader, err := conn.VStream(ctx2, topodatapb.TabletType_REPLICA, vgtid, filter, flags) require.NoError(t, err) @@ -626,28 +563,16 @@ func testVStreamCellFlag(t *testing.T) { // We also reuse the setup of this test to validate that the "vstream * from" vtgate query functionality is functional func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - mainClusterConfig.vreplicationCompressGTID = true - oldVTTabletExtraArgs := extraVTTabletArgs - extraVTTabletArgs = append(extraVTTabletArgs, - // Test VPlayer batching mode. - fmt.Sprintf("--vreplication_experimental_flags=%d", - vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), - ) - defer func() { - mainClusterConfig.vreplicationCompressGTID = false - extraVTTabletArgs = oldVTTabletExtraArgs - }() - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - allCellNames = "zone1,zone2" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] + defer mainClusterConfig.enableGTIDCompression() + defer setAllVTTabletExperimentalFlags() + vc = NewVitessCluster(t, &clusterOptions{cells: cells}) + defer vc.TearDown() + keyspace := "product" shard := "0" require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) - defer vc.TearDown(t) cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] @@ -657,26 +582,21 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { result, err := vc.VtctlClient.ExecuteCommandWithOutput("AddCellsAlias", "--", "--cells", "zone2", "alias") require.NoError(t, err, "command failed with output: %v", result) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err = cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) + vtgate := cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, keyspace, 2) + testVStreamFrom(t, vtgate, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) + isTableInDenyList(t, vc, "product:0", "customer") + // we tag along this test so as not to create the overhead of creating another cluster + testVStreamCellFlag(t) } // testVStreamFrom confirms that the "vstream * from" endpoint is serving data -func testVStreamFrom(t *testing.T, table string, expectedRowCount int) { +func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, expectedRowCount int) { ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -748,11 +668,6 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "80-") - require.NoError(t, err) - // Assume we are operating on first cell defaultCell := cells[0] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] @@ -772,7 +687,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // The wait in the next code block which checks that customer.dec80 is updated, also confirms that the // blob-related dmls we execute here are vreplicated. insertIntoBlobTable(t) - + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() // Confirm that the 0 scale decimal field, dec80, is replicated correctly dec80Replicated := false execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") @@ -841,8 +757,9 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } } vdiffSideBySide(t, ksWorkflow, "") - switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + cellNames := getCellNames(cells) + switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) + switchReads(t, workflowType, cellNames, ksWorkflow, false) assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) var commit func(t *testing.T) @@ -886,7 +803,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate - switchReads(t, workflowType, allCellNames, ksWorkflow, true) + switchReads(t, workflowType, cellNames, ksWorkflow, true) printShardPositions(vc, ksShards) switchWrites(t, workflowType, ksWorkflow, true) @@ -906,7 +823,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl waitForNoWorkflowLag(t, vc, targetKs, workflow) // Go forward again - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, cellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) var exists bool @@ -914,7 +831,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) - moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") @@ -959,6 +876,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl func validateRollupReplicates(t *testing.T) { t.Run("validateRollupReplicates", func(t *testing.T) { insertMoreProducts(t) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() waitForRowCount(t, vtgateConn, "product", "rollup", 1) waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) @@ -967,6 +886,8 @@ func validateRollupReplicates(t *testing.T) { func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias string) { t.Run("reshardCustomer2to4Split", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", @@ -980,6 +901,8 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str func reshardMerchant2to3SplitMerge(t *testing.T) { t.Run("reshardMerchant2to3SplitMerge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", @@ -1027,6 +950,8 @@ func reshardMerchant2to3SplitMerge(t *testing.T) { func reshardMerchant3to1Merge(t *testing.T) { t.Run("reshardMerchant3to1Merge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-2000": 3} reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", @@ -1060,21 +985,18 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou tabletIDBase int, counts map[string]int, dryRunResultSwitchReads, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string, autoIncrementStep int) { t.Run("reshard", func(t *testing.T) { + defaultCell := vc.Cells[vc.CellNames[0]] if cells == nil { cells = []*Cell{defaultCell} } if sourceCellOrAlias == "" { sourceCellOrAlias = defaultCell.Name } + callNames := getCellNames(cells) ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) - arrTargetShardNames := strings.Split(targetShards, ",") - for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) - } tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary") // Test multi-primary setups, like a Galera cluster, which have auto increment steps > 1. @@ -1098,13 +1020,13 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou restartWorkflow(t, ksWorkflow) vdiffSideBySide(t, ksWorkflow, "") if dryRunResultSwitchReads != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica") if dryRunResultSwitchWrites != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary") reshardAction(t, "Complete", workflow, ksName, "", "", "", "") for tabletName, count := range counts { if tablets[tabletName] == nil { @@ -1117,6 +1039,9 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou func shardOrders(t *testing.T) { t.Run("shardOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + defaultCell := vc.Cells[vc.CellNames[0]] workflow := "o2c" cell := defaultCell.Name sourceKs := "product" @@ -1133,7 +1058,7 @@ func shardOrders(t *testing.T) { catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1) @@ -1159,7 +1084,10 @@ func checkThatVDiffFails(t *testing.T, keyspace, workflow string) { func shardMerchant(t *testing.T) { t.Run("shardMerchant", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "p2m" + defaultCell := vc.Cells[vc.CellNames[0]] cell := defaultCell.Name sourceKs := "product" targetKs := merchantKeyspace @@ -1168,10 +1096,6 @@ func shardMerchant(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "80-") - require.NoError(t, err) moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace] merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet @@ -1181,7 +1105,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab2, workflow, workflowType) vdiffSideBySide(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) printRoutingRules(t, vc, "After merchant movetables") @@ -1234,6 +1158,7 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { // materializing from "product" keyspace to "customer" keyspace workflow := "cproduct" keyspace := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec, useVtctldClient) customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "primary") @@ -1312,9 +1237,12 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { func materializeRollup(t *testing.T, useVtctldClient bool) { t.Run("materializeRollup", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" workflow := "rollup" applyVSchema(t, materializeSalesVSchema, keyspace) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec, useVtctldClient) catchup(t, productTab, workflow, "Materialize") @@ -1326,9 +1254,12 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { func materializeSales(t *testing.T, useVtctldClient bool) { t.Run("materializeSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) materialize(t, materializeSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") waitForRowCount(t, vtgateConn, "product", "sales", 2) @@ -1339,8 +1270,11 @@ func materializeSales(t *testing.T, useVtctldClient bool) { func materializeMerchantSales(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "msales" materialize(t, materializeMerchantSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1353,10 +1287,13 @@ func materializeMerchantSales(t *testing.T, useVtctldClient bool) { func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "morders" keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) materialize(t, materializeMerchantOrdersSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1617,6 +1554,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index a5cac4c68f8..6ca8dcfe472 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -40,23 +39,13 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { extendedTimeout := defaultTimeout * 4 - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVSchemaChanges", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() - require.NotNil(t, vc) - - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() // ch is used to signal that there is significant data inserted into the tables and when a lot of vschema changes have been applied diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 5c5e6a80130..8b21cf6fb60 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -43,26 +42,21 @@ import ( // - We ensure that this works through active reparents and doesn't miss any events // - We stream only from the primary and while streaming we reparent to a replica and then back to the original primary func testVStreamWithFailover(t *testing.T, failover bool) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamWithFailover", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() require.NotNil(t, vc) defaultReplicas = 2 defaultRdonly = 0 - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) insertInitialData(t) + vtgate := defaultCell.Vtgates[0] + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, vtgate, "product", 2) + }) ctx := context.Background() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { @@ -90,6 +84,9 @@ func testVStreamWithFailover(t *testing.T, failover bool) { stopInserting := false id := 0 + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + // first goroutine that keeps inserting rows into table being streamed until some time elapses after second PRS go func() { for { @@ -217,6 +214,11 @@ const vschemaSharded = ` ` func insertRow(keyspace, table string, id int) { + vtgateConn := getConnectionNoError(vc.t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + // Due to race conditions this call is sometimes made after vtgates have shutdown. In that case just return. + if vtgateConn == nil { + return + } vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false) vtgateConn.ExecuteFetch("begin", 1000, false) _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) @@ -228,33 +230,25 @@ func insertRow(keyspace, table string, id int) { type numEvents struct { numRowEvents, numJournalEvents int64 - numLessThan80Events, numGreaterThan80Events int64 - numLessThan40Events, numGreaterThan40Events int64 + numDash80Events, num80DashEvents int64 + numDash40Events, num40DashEvents int64 numShard0BeforeReshardEvents, numShard0AfterReshardEvents int64 } // tests the StopOnReshard flag func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID int) *numEvents { defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamStopOnReshard", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "unsharded", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) // some initial data @@ -325,13 +319,13 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID shard := ev.RowEvent.Shard switch shard { case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -385,25 +379,17 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID // Validate that we can continue streaming from multiple keyspaces after first copying some tables and then resharding one of the keyspaces // Ensure that there are no missing row events during the resharding process. func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents { - defaultCellName := "zone1" - allCellNames = defaultCellName - allCells := []string{allCellNames} - vc = NewVitessCluster(t, "VStreamCopyMultiKeyspaceReshard", allCells, mainClusterConfig) - - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) ogdr := defaultReplicas defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func(dr int) { defaultReplicas = dr }(ogdr) - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -468,13 +454,13 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven ne.numShard0BeforeReshardEvents++ } case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -522,7 +508,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer") insertedCustomerRows, err := customerResult.Rows[0][0].ToCastInt64() require.NoError(t, err) - require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events) + require.Equal(t, insertedCustomerRows, ne.numDash80Events+ne.num80DashEvents+ne.numDash40Events+ne.num40DashEvents) return ne } @@ -534,20 +520,20 @@ func TestVStreamStopOnReshardTrue(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, true, 1000) require.Greater(t, ne.numJournalEvents, int64(0)) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.Zero(t, ne.numLessThan40Events) - require.Zero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.Zero(t, ne.numDash40Events) + require.Zero(t, ne.num40DashEvents) } func TestVStreamStopOnReshardFalse(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, false, 2000) require.Equal(t, int64(0), ne.numJournalEvents) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } func TestVStreamWithKeyspacesToWatch(t *testing.T) { @@ -564,8 +550,8 @@ func TestVStreamCopyMultiKeyspaceReshard(t *testing.T) { require.NotZero(t, ne.numRowEvents) require.NotZero(t, ne.numShard0BeforeReshardEvents) require.NotZero(t, ne.numShard0AfterReshardEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index dc11ac7bd9c..04738ee7857 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -692,8 +692,8 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("create table %s.dst4(id1 int, val varbinary(128), primary key(id1))", vrepldb), "create table src5(id1 int, id2 int, val varbinary(128), primary key(id1))", fmt.Sprintf("create table %s.dst5(id1 int, val varbinary(128), primary key(id1))", vrepldb), - "create table srcCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", - fmt.Sprintf("create table %s.dstCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), + "create table src_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", + fmt.Sprintf("create table %s.dst_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), }) defer execStatements(t, []string{ "drop table src1", @@ -711,8 +711,8 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("drop table %s.dst4", vrepldb), "drop table src5", fmt.Sprintf("drop table %s.dst5", vrepldb), - "drop table srcCharset", - fmt.Sprintf("drop table %s.dstCharset", vrepldb), + "drop table src_charset", + fmt.Sprintf("drop table %s.dst_charset", vrepldb), }) env.SchemaEngine.Reload(context.Background()) @@ -737,8 +737,8 @@ func TestPlayerFilters(t *testing.T) { Match: "dst5", Filter: "select id1, val from src5 where val = 'abc'", }, { - Match: "dstCharset", - Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from srcCharset", + Match: "dst_charset", + Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from src_charset", }}, } bls := &binlogdatapb.BinlogSource{ @@ -986,14 +986,14 @@ func TestPlayerFilters(t *testing.T) { data: [][]string{{"1", "abc"}, {"4", "abc"}}, }, { // test collation + filter - input: "insert into srcCharset values (1,'木元')", + input: "insert into src_charset values (1,'木元')", output: qh.Expect( "begin", - "insert into dstCharset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", + "insert into dst_charset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", "/update _vt.vreplication set pos=", "commit", ), - table: "dstCharset", + table: "dst_charset", data: [][]string{{"1", "木abcxyz", "木abcxyz"}}, }} diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 1bc76a2868a..c60076e9766 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -80,16 +80,12 @@ var ( "21", "22", "mysql_server_vault", - "vstream_failover", - "vstream_stoponreshard_true", - "vstream_stoponreshard_false", - "vstream_with_keyspaces_to_watch", + "vstream", "onlineddl_ghost", "onlineddl_vrepl", "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", "onlineddl_vrepl_suite", - "vreplication_migrate_vdiff2_convert_tz", "onlineddl_revert", "onlineddl_scheduler", "tabletmanager_throttler_topo", @@ -116,13 +112,12 @@ var ( "xb_recovery", "mysql80", "vreplication_across_db_versions", - "vreplication_multicell", - "vreplication_cellalias", "vreplication_basic", + "vreplication_cellalias", "vreplication_v2", - "vreplication_partial_movetables_basic", - "vreplication_partial_movetables_sequences", + "vreplication_partial_movetables_and_materialize", "vreplication_foreign_key_stress", + "vreplication_migrate_vdiff2_convert_tz", "schemadiff_vrepl", "topo_connection_cache", "vtgate_partial_keyspace", diff --git a/test/config.json b/test/config.json index cc68301a052..2a7b18fa299 100644 --- a/test/config.json +++ b/test/config.json @@ -1004,21 +1004,12 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_multicell": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "MultiCell"], - "Command": [], - "Manual": false, - "Shard": "vreplication_multicell", - "RetryMax": 2, - "Tags": [] - }, "vreplication_materialize": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterialize"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1027,7 +1018,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterializeVtctldClient"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1045,7 +1036,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "PartialMoveTablesBasic"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1054,7 +1045,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMultipleConcurrentVDiffs"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1108,7 +1099,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestPartialMoveTablesWithSequences"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_sequences", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 1, "Tags": [] }, @@ -1126,7 +1117,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamFailover"], "Command": [], "Manual": false, - "Shard": "vstream_failover", + "Shard": "vstream", "RetryMax": 3, "Tags": [] }, @@ -1135,7 +1126,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardTrue"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_true", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1144,7 +1135,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardFalse"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_false", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1153,7 +1144,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamWithKeyspacesToWatch"], "Command": [], "Manual": false, - "Shard": "vstream_with_keyspaces_to_watch", + "Shard": "vstream", "RetryMax": 1, "Tags": [] },