diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 5d703099a3d..e6bccee0004 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -121,6 +121,13 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + - name: Install Minio + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + wget https://dl.min.io/server/minio/release/linux-amd64/minio + chmod +x minio + mv minio /usr/local/bin + - name: Setup launchable dependencies if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | diff --git a/.github/workflows/cluster_endtoend_vtgate_plantests.yml b/.github/workflows/cluster_endtoend_vtgate_plantests.yml new file mode 100644 index 00000000000..93ed6a55f05 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_plantests.yml @@ -0,0 +1,166 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_plantests) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_plantests)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (vtgate_plantests) + runs-on: ubuntu-24.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl -s\ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_vtgate_plantests.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version-file: go.mod + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get -qq update + + # We have to install this old version of libaio1 in case we end up testing with MySQL 5.7. See also: + # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501 + curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb + sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb + # libtinfo5 is also needed for older MySQL 5.7 builds. + curl -L -O http://mirrors.kernel.org/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb + sudo dpkg -i libtinfo5_6.3-2ubuntu0.1_amd64.deb + + # Install everything else we need, and configure + sudo apt-get -qq install -y mysql-server mysql-shell mysql-client make unzip g++ etcd-client etcd-server curl git wget eatmydata xz-utils libncurses6 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard vtgate_plantests | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt + + - name: Test Summary + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + with: + paths: "report.xml" + show: "fail" diff --git a/go/test/endtoend/backup/s3/s3_builtin_test.go b/go/test/endtoend/backup/s3/s3_builtin_test.go new file mode 100644 index 00000000000..9c83e5f8fec --- /dev/null +++ b/go/test/endtoend/backup/s3/s3_builtin_test.go @@ -0,0 +1,434 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "context" + "io" + "os" + "os/exec" + "path" + "strconv" + "strings" + "testing" + "time" + + "log" + + "github.com/minio/minio-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/blackbox" + "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" +) + +/* + These tests use Minio to emulate AWS S3. It allows us to run the tests on + GitHub Actions without having the security burden of carrying out AWS secrets + in our GitHub repo. + + Minio is almost a drop-in replacement for AWS S3, if you want to run these + tests against a true AWS S3 Bucket, you can do so by not running the TestMain + and setting the 'AWS_*' environment variables to your own values. + + This package and file are named 'endtoend', but it's more an integration test. + However, we don't want our CI infra to mistake this for a regular unit-test, + hence the rename to 'endtoend'. +*/ + +func TestMain(m *testing.M) { + f := func() int { + minioPath, err := exec.LookPath("minio") + if err != nil { + log.Fatalf("minio binary not found: %v", err) + } + + dataDir, err := os.MkdirTemp("", "") + if err != nil { + log.Fatalf("could not create temporary directory: %v", err) + } + err = os.MkdirAll(dataDir, 0755) + if err != nil { + log.Fatalf("failed to create MinIO data directory: %v", err) + } + + cmd := exec.Command(minioPath, "server", dataDir, "--console-address", ":9001") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + err = cmd.Start() + if err != nil { + log.Fatalf("failed to start MinIO: %v", err) + } + defer func() { + cmd.Process.Kill() + }() + + // Local MinIO credentials + accessKey := "minioadmin" + secretKey := "minioadmin" + minioEndpoint := "http://localhost:9000" + bucketName := "test-bucket" + region := "us-east-1" + + client, err := minio.New("localhost:9000", accessKey, secretKey, false) + if err != nil { + log.Fatalf("failed to create MinIO client: %v", err) + } + waitForMinio(client) + + err = client.MakeBucket(bucketName, region) + if err != nil { + log.Fatalf("failed to create test bucket: %v", err) + } + + // Same env variables that are used between AWS S3 and Minio + os.Setenv("AWS_ACCESS_KEY_ID", accessKey) + os.Setenv("AWS_SECRET_ACCESS_KEY", secretKey) + os.Setenv("AWS_BUCKET", bucketName) + os.Setenv("AWS_ENDPOINT", minioEndpoint) + os.Setenv("AWS_REGION", region) + + return m.Run() + } + + os.Exit(f()) +} + +func waitForMinio(client *minio.Client) { + for i := 0; i < 60; i++ { + _, err := client.ListBuckets() + if err == nil { + return + } + time.Sleep(1 * time.Second) + } + log.Fatalf("MinIO server did not become ready in time") +} + +func checkEnvForS3(t *testing.T) { + // We never want to skip the tests if we are running on CI. + // We will always run these tests on CI with the TestMain and Minio. + // There should not be a need to skip the tests due to missing ENV vars. + if os.Getenv("GITHUB_ACTIONS") != "" { + return + } + + envRequired := []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_BUCKET", + "AWS_ENDPOINT", + "AWS_REGION", + } + + var missing []string + for _, s := range envRequired { + if os.Getenv(s) == "" { + missing = append(missing, s) + } + } + if len(missing) > 0 { + t.Skipf("missing AWS secrets to run this test: please set: %s", strings.Join(missing, ", ")) + } +} + +type backupTestConfig struct { + concurrency int + addFileReturnFn func(s3 *s3backupstorage.S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) + checkCleanupError bool + expectedResult mysqlctl.BackupResult + expectedStats blackbox.StatSummary +} + +func runBackupTest(t *testing.T, cfg backupTestConfig) { + checkEnvForS3(t) + s3backupstorage.InitFlag(s3backupstorage.FakeConfig{ + Region: os.Getenv("AWS_REGION"), + Endpoint: os.Getenv("AWS_ENDPOINT"), + Bucket: os.Getenv("AWS_BUCKET"), + ForcePath: true, + }) + + ctx := context.Background() + backupRoot, keyspace, shard, ts := blackbox.SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := blackbox.SetBuiltinBackupMysqldDeadline(time.Second) + defer blackbox.SetBuiltinBackupMysqldDeadline(oldDeadline) + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + bh, err := s3backupstorage.NewFakeS3BackupHandle(ctx, t.Name(), time.Now().Format(mysqlctl.BackupTimestampFormat), logger, fakeStats) + require.NoError(t, err) + t.Cleanup(func() { + err := bh.AbortBackup(ctx) + if cfg.checkCleanupError { + require.NoError(t, err) + } + }) + bh.AddFileReturnF = cfg.addFileReturnFn + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: cfg.concurrency, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + }, bh) + + require.Equal(t, cfg.expectedResult, backupResult) + switch cfg.expectedResult { + case mysqlctl.BackupUsable: + require.NoError(t, err) + case mysqlctl.BackupUnusable, mysqlctl.BackupEmpty: + require.Error(t, err) + } + + ss := blackbox.GetStats(fakeStats) + require.Equal(t, cfg.expectedStats.DestinationCloseStats, ss.DestinationCloseStats) + require.Equal(t, cfg.expectedStats.DestinationOpenStats, ss.DestinationOpenStats) + require.Equal(t, cfg.expectedStats.DestinationWriteStats, ss.DestinationWriteStats) + require.Equal(t, cfg.expectedStats.SourceCloseStats, ss.SourceCloseStats) + require.Equal(t, cfg.expectedStats.SourceOpenStats, ss.SourceOpenStats) + require.Equal(t, cfg.expectedStats.SourceReadStats, ss.SourceReadStats) +} + +func TestExecuteBackupS3FailEachFileOnce(t *testing.T) { + runBackupTest(t, backupTestConfig{ + concurrency: 2, + + // Modify the fake S3 storage to always fail when trying to write a file for the first time + addFileReturnFn: s3backupstorage.FailFirstWrite, + checkCleanupError: true, + expectedResult: mysqlctl.BackupUsable, + + // Even though we have 4 files, we expect '8' for all the values below as we re-do every file once. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 8, + DestinationOpenStats: 8, + DestinationWriteStats: 8, + SourceCloseStats: 8, + SourceOpenStats: 8, + SourceReadStats: 8, + }, + }) +} + +func TestExecuteBackupS3FailEachFileTwice(t *testing.T) { + runBackupTest(t, backupTestConfig{ + concurrency: 1, + + // Modify the fake S3 storage to always fail when trying to write a file for the first time + addFileReturnFn: s3backupstorage.FailAllWrites, + + // If the code works as expected by this test, no files will be created on S3 and AbortBackup will + // fail, for this reason, let's not check the error return. + // We still call AbortBackup anyway in the event that the code is not behaving as expected and some + // files were created by mistakes, we delete them. + checkCleanupError: false, + expectedResult: mysqlctl.BackupUnusable, + + // All stats here must be equal to 5, we have four files, we go each of them, they all fail. + // The logic decides to retry each file once, we retry the first failed file, it fails again + // but since it has reached the limit of retries, the backup will fail anyway, thus we don't + // retry the other 3 files. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 5, + DestinationOpenStats: 5, + DestinationWriteStats: 5, + SourceCloseStats: 5, + SourceOpenStats: 5, + SourceReadStats: 5, + }, + }) +} + +type restoreTestConfig struct { + readFileReturnFn func(s3 *s3backupstorage.S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) + expectSuccess bool + expectedStats blackbox.StatSummary +} + +func runRestoreTest(t *testing.T, cfg restoreTestConfig) { + checkEnvForS3(t) + s3backupstorage.InitFlag(s3backupstorage.FakeConfig{ + Region: os.Getenv("AWS_REGION"), + Endpoint: os.Getenv("AWS_ENDPOINT"), + Bucket: os.Getenv("AWS_BUCKET"), + ForcePath: true, + }) + + ctx := context.Background() + backupRoot, keyspace, shard, ts := blackbox.SetupCluster(ctx, t, 2, 2) + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + be := &mysqlctl.BuiltinBackupEngine{} + dirName := time.Now().Format(mysqlctl.BackupTimestampFormat) + name := t.Name() + "-" + strconv.Itoa(int(time.Now().Unix())) + bh, err := s3backupstorage.NewFakeS3BackupHandle(ctx, name, dirName, logger, fakeStats) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, bh.AbortBackup(ctx)) + }) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // Backup is done, let's move on to the restore now + + restoreBh, err := s3backupstorage.NewFakeS3RestoreHandle(ctx, name, logger, fakeStats) + require.NoError(t, err) + restoreBh.ReadFileReturnF = cfg.readFileReturnFn + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, restoreBh) + + if cfg.expectSuccess { + assert.NoError(t, err) + assert.NotNil(t, bm) + } else { + assert.Error(t, err) + } + + ss := blackbox.GetStats(fakeStats) + require.Equal(t, cfg.expectedStats.DestinationCloseStats, ss.DestinationCloseStats) + require.Equal(t, cfg.expectedStats.DestinationOpenStats, ss.DestinationOpenStats) + require.Equal(t, cfg.expectedStats.DestinationWriteStats, ss.DestinationWriteStats) + require.Equal(t, cfg.expectedStats.SourceCloseStats, ss.SourceCloseStats) + require.Equal(t, cfg.expectedStats.SourceOpenStats, ss.SourceOpenStats) + require.Equal(t, cfg.expectedStats.SourceReadStats, ss.SourceReadStats) +} + +func TestExecuteRestoreS3FailEachFileOnce(t *testing.T) { + runRestoreTest(t, restoreTestConfig{ + readFileReturnFn: s3backupstorage.FailFirstRead, + expectSuccess: true, + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 8, + DestinationOpenStats: 8, + DestinationWriteStats: 4, // 4, because on the first attempt, we fail to read before writing to the filesystem + SourceCloseStats: 8, + SourceOpenStats: 8, + SourceReadStats: 8, + }, + }) +} + +func TestExecuteRestoreS3FailEachFileTwice(t *testing.T) { + runRestoreTest(t, restoreTestConfig{ + readFileReturnFn: s3backupstorage.FailAllReadExpectManifest, + expectSuccess: false, + + // Everything except destination writes must be equal to 5: + // +1 for every file on the first attempt (= 4), and +1 for the first file we try for the second time. + // Since we fail early as soon as a second-attempt-file fails, we won't see a value above 5. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 5, + DestinationOpenStats: 5, + DestinationWriteStats: 0, // 0, because on the both attempts, we fail to read before writing to the filesystem + SourceCloseStats: 5, + SourceOpenStats: 5, + SourceReadStats: 5, + }, + }) +} diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 4e018986100..c7a09c70d13 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) // Reset the tablet object values in order on init tablet in the next step. primary.VttabletProcess.ServingStatus = "NOT_SERVING" diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 367956c9827..6e1840b2979 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 7c03b776c74..86b2612a044 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -405,7 +405,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe }, // } - defer cluster.PanicHandler(t) // setup cluster for the testing code, err := LaunchCluster(setupType, streamMode, stripes, cDetails) require.Nilf(t, err, "setup failed with status code %d", code) @@ -1507,7 +1506,6 @@ func getLastBackup(t *testing.T) string { func TestBackupEngineSelector(t *testing.T) { defer setDefaultCommonArgs() - defer cluster.PanicHandler(t) // launch the custer with xtrabackup as the default engine code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"}) @@ -1548,7 +1546,6 @@ func TestBackupEngineSelector(t *testing.T) { func TestRestoreAllowedBackupEngines(t *testing.T) { defer setDefaultCommonArgs() - defer cluster.PanicHandler(t) backupMsg := "right after xtrabackup backup" diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 4c84c3e63bc..1b04dbc7aab 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -95,7 +95,6 @@ func waitForReplica(t *testing.T, replicaIndex int) int { // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing @@ -339,7 +338,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // ExecTestIncrementalBackupAndRestoreToPos func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) var lastInsertedRowTimestamp time.Time insertRowOnPrimary := func(t *testing.T, hint string) { @@ -605,7 +603,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index 07e8d687f4e..6e8f901a245 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -90,7 +90,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -232,7 +231,6 @@ func TestMain(m *testing.M) { } func TestAlias(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) @@ -296,7 +294,6 @@ func TestAlias(t *testing.T) { } func TestAddAliasWhileVtgateUp(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index b89e007b4f2..2ac46a8d145 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -1043,7 +1043,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { - PanicHandler(nil) cluster.mx.Lock() defer cluster.mx.Unlock() if cluster.teardownCompleted { diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 061e632dde7..cfc2071a746 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -126,15 +126,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test") } -// PanicHandler handles the panic in the testcase. -func PanicHandler(t testing.TB) { - err := recover() - if t == nil { - return - } - require.Nilf(t, err, "panic occured in testcase %v", t.Name()) -} - // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index c01f7c6e93b..1290156a1cd 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -132,6 +132,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { return err } vtgate.proc.Stderr = errFile + vtgate.ErrorLog = errFile.Name() vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) vtgate.proc.Env = append(vtgate.proc.Env, DefaultVttestEnv) diff --git a/go/test/endtoend/clustertest/add_keyspace_test.go b/go/test/endtoend/clustertest/add_keyspace_test.go index edee87d035e..b8422b52eb3 100644 --- a/go/test/endtoend/clustertest/add_keyspace_test.go +++ b/go/test/endtoend/clustertest/add_keyspace_test.go @@ -61,7 +61,6 @@ primary key (id) ) func TestAddKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil { log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err) t.Fatal(err) diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 5239d960c47..f47a002a3cf 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -24,12 +24,9 @@ import ( "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { - defer cluster.PanicHandler(t) // Confirm the basic etcd cluster health. etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go index 35da40a3edb..3fc2524208b 100644 --- a/go/test/endtoend/clustertest/main_test.go +++ b/go/test/endtoend/clustertest/main_test.go @@ -60,7 +60,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index c61f7820bb7..18d06cf3299 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -30,8 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) var ( @@ -44,7 +42,6 @@ var ( ) func TestVtctldProcess(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/clustertest/vtgate_test.go b/go/test/endtoend/clustertest/vtgate_test.go index 2f72682a391..264b292f482 100644 --- a/go/test/endtoend/clustertest/vtgate_test.go +++ b/go/test/endtoend/clustertest/vtgate_test.go @@ -32,11 +32,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVtgateProcess(t *testing.T) { - defer cluster.PanicHandler(t) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 5e7d5e27182..86fc2a6983c 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -25,12 +25,9 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVttabletProcess(t *testing.T) { - defer cluster.PanicHandler(t) firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url") resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort)) @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) { } func TestDeleteTablet(t *testing.T) { - defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 7dea6cf525f..460ae310d7f 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -42,7 +42,6 @@ var ( // This test makes sure that we can use SSL replication with Vitess func TestSecure(t *testing.T) { - defer cluster.PanicHandler(t) testReplicationBase(t, true) testReplicationBase(t, false) } diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index 1363e07b2cd..86c847125a7 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -102,7 +102,6 @@ var ( ) func TestSecureTransport(t *testing.T) { - defer cluster.PanicHandler(t) flag.Parse() // initialize cluster diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 2a665c66214..f65301b9bb4 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -81,7 +81,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -167,7 +166,6 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { } func TestGetSrvKeyspaceNames(t *testing.T) { - defer cluster.PanicHandler(t) data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) @@ -180,7 +178,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } func TestGetSrvKeyspacePartitions(t *testing.T) { - defer cluster.PanicHandler(t) shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) otherShardRefFound := false for _, partition := range shardedSrvKeyspace.Partitions { @@ -209,20 +206,17 @@ func TestGetSrvKeyspacePartitions(t *testing.T) { } func TestShardNames(t *testing.T) { - defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell) require.NoError(t, err) require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell) } func TestGetKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } func TestDeleteKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ @@ -353,7 +347,6 @@ func TestDeleteKeyspace(t *testing.T) { } */ func TestShardCountForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardCountForKeyspace(t, keyspaceUnshardedName, 1) testShardCountForKeyspace(t, keyspaceShardedName, 2) } @@ -370,7 +363,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) { } func TestShardNameForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"}) testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"}) } @@ -389,7 +381,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string } func TestKeyspaceToShardName(t *testing.T) { - defer cluster.PanicHandler(t) var id []byte srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) diff --git a/go/test/endtoend/messaging/main_test.go b/go/test/endtoend/messaging/main_test.go index 49477ebe631..c654869316b 100644 --- a/go/test/endtoend/messaging/main_test.go +++ b/go/test/endtoend/messaging/main_test.go @@ -104,7 +104,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 7e1190c16bb..e91a8dcc335 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) { // TestReparenting checks the client connection count after reparenting. func TestReparenting(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" ctx := context.Background() @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) { // TestConnection validate the connection count and message streaming. func TestConnection(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) { } func testMessaging(t *testing.T, name, ks string) { - defer cluster.PanicHandler(t) ctx := context.Background() stream, err := VtgateGrpcConn(ctx, clusterInstance) require.Nil(t, err) diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index f93724fa4a8..070114da420 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -139,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctlProcess.Stop() require.NoError(t, err) primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID) @@ -148,7 +146,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.NoError(t, err) diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index beb155830e2..432beb0c6d5 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -141,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctldProcess.Stop() require.Nil(t, err) require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...") @@ -151,7 +149,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.Nil(t, err, "error should be nil") diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 18b169e33d7..20da69e18e8 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -61,7 +61,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() // setting grpc max size diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 6b691582c66..ee6e973593b 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -35,14 +35,12 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" _ "github.com/go-sql-driver/mysql" ) // TestMultiStmt checks that multiStatements=True and multiStatements=False work properly. func TestMultiStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() // connect database with multiStatements=True @@ -70,7 +68,6 @@ func TestMultiStatement(t *testing.T) { // TestLargeComment add large comment in insert stmt and validate the insert process. func TestLargeComment(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -89,7 +86,6 @@ func TestLargeComment(t *testing.T) { // TestInsertLargerThenGrpcLimit insert blob larger then grpc limit and verify the error. func TestInsertLargerThenGrpcLimit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() @@ -109,7 +105,6 @@ func TestInsertLargerThenGrpcLimit(t *testing.T) { // TestTimeout executes sleep(5) with query_timeout of 1 second, and verifies the error. func TestTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -125,7 +120,6 @@ func TestTimeout(t *testing.T) { // TestInvalidField tries to fetch invalid column and verifies the error. func TestInvalidField(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -141,7 +135,6 @@ func TestInvalidField(t *testing.T) { // TestWarnings validates the behaviour of SHOW WARNINGS. func TestWarnings(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -183,7 +176,6 @@ func TestWarnings(t *testing.T) { // TestSelectWithUnauthorizedUser verifies that an unauthorized user // is not able to read from the table. func TestSelectWithUnauthorizedUser(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() tmpVtParam := vtParams @@ -202,7 +194,6 @@ func TestSelectWithUnauthorizedUser(t *testing.T) { // TestPartitionedTable validates that partitioned tables are recognized by schema engine func TestPartitionedTable(t *testing.T) { - defer cluster.PanicHandler(t) tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go index 035789e4b87..ee8141860f4 100644 --- a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -120,7 +120,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -197,7 +196,6 @@ func TestMain(m *testing.M) { } func TestOnlineDDLFlow(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() require.NotNil(t, clusterInstance) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index af88806fb26..a13077ef87b 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -137,7 +137,6 @@ type revertibleTestCase struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -204,7 +203,6 @@ func TestMain(m *testing.M) { } func TestRevertSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 53a1c8137fd..5f6423b2556 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -239,7 +239,6 @@ func waitForMessage(t *testing.T, uuid string, messageSubstring string) { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -321,7 +320,6 @@ func TestSchedulerSchemaChanges(t *testing.T) { } func testScheduler(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1593,7 +1591,6 @@ func testScheduler(t *testing.T) { } func testSingleton(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1844,7 +1841,6 @@ DROP TABLE IF EXISTS stress_test }) } func testDeclarative(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -2516,7 +2512,6 @@ func testDeclarative(t *testing.T) { } func testForeignKeys(t *testing.T) { - defer cluster.PanicHandler(t) var ( createStatements = []string{ diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 92dfa2b0c4a..a7c38527152 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -160,7 +160,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -224,7 +223,6 @@ func TestMain(m *testing.M) { } func TestVreplSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 2, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 88c145dc40c..f7b222c175d 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -159,7 +159,6 @@ func nextOpOrder() int64 { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -226,7 +225,6 @@ func TestMain(m *testing.M) { } func TestVreplMiniStressSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 85b3585beb4..1e52db38bd0 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -407,7 +407,6 @@ func mysqlParams() *mysql.ConnParams { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -478,7 +477,6 @@ func TestMain(m *testing.M) { } func TestVreplStressSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 972421c96da..4a2f7f1a3ce 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -67,7 +67,6 @@ const ( // Use $VREPL_SUITE_TEST_FILTER environment variable to filter tests by name. func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() testsFilter = os.Getenv(testFilterEnvVar) @@ -133,7 +132,6 @@ func TestMain(m *testing.M) { } func TestVreplSuiteSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/preparestmt/main_test.go b/go/test/endtoend/preparestmt/main_test.go index 018e9d266fd..0e067062c94 100644 --- a/go/test/endtoend/preparestmt/main_test.go +++ b/go/test/endtoend/preparestmt/main_test.go @@ -162,7 +162,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 24fb58bff81..5768c6eec7a 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -27,20 +27,16 @@ import ( "github.com/icrowley/fake" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestSelect simple select the data without any condition. func TestSelect(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() selectWhere(t, dbo, "") } func TestSelectDatabase(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() prepare, err := dbo.Prepare("select database()") @@ -58,7 +54,6 @@ func TestSelectDatabase(t *testing.T) { // TestInsertUpdateDelete validates all insert, update and // delete method on prepared statements. func TestInsertUpdateDelete(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // prepare insert statement @@ -134,7 +129,6 @@ func testReplica(t *testing.T) { // testcount validates inserted rows count with expected count. func testcount(t *testing.T, dbo *sql.DB, except int) { - defer cluster.PanicHandler(t) r, err := dbo.Query("SELECT count(1) FROM " + tableName) require.Nil(t, err) @@ -148,7 +142,6 @@ func testcount(t *testing.T, dbo *sql.DB, except int) { // TestAutoIncColumns test insertion of row without passing // the value of auto increment columns (here it is id). func TestAutoIncColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // insert a row without id @@ -227,7 +220,6 @@ func reconnectAndTest(t *testing.T) { // TestColumnParameter query database using column // parameter. func TestColumnParameter(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -267,7 +259,6 @@ func TestColumnParameter(t *testing.T) { // TestWrongTableName query database using invalid // tablename and validate error. func TestWrongTableName(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() execWithError(t, dbo, []uint16{1146}, "select * from teseting_table;") @@ -319,7 +310,6 @@ func getStringToString(x sql.NullString) string { } func TestSelectDBA(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -381,7 +371,6 @@ func TestSelectDBA(t *testing.T) { } func TestSelectLock(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -417,7 +406,6 @@ func TestSelectLock(t *testing.T) { } func TestShowColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -438,7 +426,6 @@ func TestShowColumns(t *testing.T) { } func TestBinaryColumn(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t, "interpolateParams=false") defer dbo.Close() diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index f2a76662918..3bb2399737e 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -126,7 +126,6 @@ var ( // - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards // - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards. func TestPITRRecovery(t *testing.T) { - defer cluster.PanicHandler(nil) initializeCluster(t) defer clusterInstance.Teardown() @@ -525,7 +524,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * tablet.MysqlctlProcess = *mysqlctlProcess extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword - tablet.VttabletProcess.DbPassword = mysqlPassword tablet.MysqlctlProcess.ExtraArgs = extraArgs err = tablet.MysqlctlProcess.Start() require.NoError(t, err) @@ -545,6 +543,7 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * clusterInstance.VtTabletExtraArgs, clusterInstance.DefaultCharset) tablet.Alias = tablet.VttabletProcess.TabletPath + tablet.VttabletProcess.DbPassword = mysqlPassword tablet.VttabletProcess.SupportsBackup = true tablet.VttabletProcess.Keyspace = restoreKeyspaceName tablet.VttabletProcess.ExtraArgs = []string{ diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 1ebb7c2647f..ae6b152271b 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -72,7 +72,6 @@ var ( // TestMainImpl creates cluster for unsharded recovery testing. func TestMainImpl(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { @@ -201,7 +200,6 @@ func TestMainImpl(m *testing.M) { // // 7. check that vtgate queries work correctly func TestRecoveryImpl(t *testing.T) { - defer cluster.PanicHandler(t) defer tabletsTeardown() verifyInitialReplication(t) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 584bccfdfb7..0d2eb8935d2 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -31,7 +31,6 @@ import ( ) func TestTrivialERS(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -56,7 +55,6 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -98,7 +96,6 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -134,7 +131,6 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -170,7 +166,6 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -198,7 +193,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -228,7 +222,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -256,7 +249,6 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -279,7 +271,6 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -351,7 +342,6 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -451,7 +441,6 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -479,7 +468,6 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -519,7 +507,6 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index b6f34af7294..a041ca04c68 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -36,7 +36,6 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -68,7 +67,6 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -104,7 +102,6 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -117,7 +114,6 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -163,7 +159,6 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -181,7 +176,6 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go index 2d89893569d..91471b1cebb 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go @@ -28,7 +28,6 @@ import ( ) func TestReparentGracefulRangeBased(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() utils.ShardName = "0000000000000000-ffffffffffffffff" diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index d3907b0bc5b..94e37d715f4 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -36,7 +36,6 @@ import ( ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -48,7 +47,6 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -62,7 +60,6 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -85,7 +82,6 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -112,7 +108,6 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -130,7 +125,6 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -178,14 +172,12 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -285,7 +277,6 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -332,7 +323,6 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -399,7 +389,6 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "cross_cell") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -439,7 +428,6 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88e3d6c09fa..c2dafb8589f 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go index 4364836841b..c6b59fd6372 100644 --- a/go/test/endtoend/reparent/prssettingspool/main_test.go +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index 07cf4a7abc8..df9bf192e65 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -33,7 +33,6 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) { if ver != 21 { t.Skip("We only want to run this test for v21 release") } - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index b4ecb367e8b..c850e22945c 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -68,7 +68,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -132,7 +131,6 @@ func TestMain(m *testing.M) { } func TestSchemadiffSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -275,7 +273,6 @@ func testSingle(t *testing.T, testName string) { } // func TestRandomSchemaChanges(t *testing.T) { -// defer cluster.PanicHandler(t) // hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore} // // count := 20 diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 192355fa6ef..3e5f2b3add7 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -102,7 +101,6 @@ func TestMain(m *testing.M) { } func TestShardedKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) shard1 := clusterInstance.Keyspaces[0].Shards[0] shard2 := clusterInstance.Keyspaces[0].Shards[1] diff --git a/go/test/endtoend/stress/stress_test.go b/go/test/endtoend/stress/stress_test.go index 30a5ee69c1a..1bf716274d4 100644 --- a/go/test/endtoend/stress/stress_test.go +++ b/go/test/endtoend/stress/stress_test.go @@ -42,7 +42,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -84,7 +83,6 @@ func TestMain(m *testing.M) { // The stressor is started on its own goroutine while the end-to-end test // is executed on the same cluster. func TestSimpleStressTest(t *testing.T) { - defer cluster.PanicHandler(t) cfg := stress.DefaultConfig cfg.ConnParams = &vtParams diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index ca4fe5f6094..920e2193453 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -272,7 +272,6 @@ type BufferingTest struct { } func (bt *BufferingTest) Test(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := bt.createCluster() if exitCode != 0 { t.Fatal("failed to start cluster") diff --git a/go/test/endtoend/tabletgateway/main_test.go b/go/test/endtoend/tabletgateway/main_test.go index 354be6969d3..cf179c49845 100644 --- a/go/test/endtoend/tabletgateway/main_test.go +++ b/go/test/endtoend/tabletgateway/main_test.go @@ -61,7 +61,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index 1f4f8758e16..d9c87fdc7f3 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -40,7 +40,6 @@ import ( ) func TestVtgateHealthCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -54,7 +53,6 @@ func TestVtgateHealthCheck(t *testing.T) { } func TestVtgateReplicationStatusCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -104,7 +102,6 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { } func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -180,7 +177,6 @@ func retryNTimes(t *testing.T, maxRetries int, f func() bool) { func TestReplicaTransactions(t *testing.T) { // TODO(deepthi): this test seems to depend on previous test. Fix tearDown so that tests are independent - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) ctx := context.Background() @@ -287,7 +283,6 @@ func TestReplicaTransactions(t *testing.T) { // TestStreamingRPCStuck tests that StreamExecute calls don't get stuck on the vttablets if a client stop reading from a stream. func TestStreamingRPCStuck(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtConn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index d5d946a164c..67127ab740f 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -44,7 +43,6 @@ var ( // TabletCommands tests the basic tablet commands func TestTabletCommands(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -185,7 +183,6 @@ func assertExecuteMultiFetch(t *testing.T, qr string) { func TestHook(t *testing.T) { // test a regular program works - defer cluster.PanicHandler(t) runHookAndAssert(t, []string{ "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--flag1", "--param1=hello"}, 0, false, "") @@ -226,7 +223,6 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus int64, expec func TestShardReplicationFix(t *testing.T) { // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica - defer cluster.PanicHandler(t) result, err := clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") require.NotNil(t, result[cell], "result should not be Nil") @@ -250,7 +246,6 @@ func TestShardReplicationFix(t *testing.T) { } func TestGetSchema(t *testing.T) { - defer cluster.PanicHandler(t) res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index 0c6e056af36..e692bf94de4 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -33,7 +33,6 @@ import ( func TestTopoCustomRule(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/lock_unlock_test.go b/go/test/endtoend/tabletmanager/lock_unlock_test.go index 79286438698..f636f52c353 100644 --- a/go/test/endtoend/tabletmanager/lock_unlock_test.go +++ b/go/test/endtoend/tabletmanager/lock_unlock_test.go @@ -30,12 +30,10 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestLockAndUnlock tests the lock ability by locking a replica and asserting it does not see changes func TestLockAndUnlock(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -76,7 +74,6 @@ func TestLockAndUnlock(t *testing.T) { // TestStartReplicationUntilAfter tests by writing three rows, noting the gtid after each, and then replaying them one by one func TestStartReplicationUntilAfter(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -130,7 +127,6 @@ func TestStartReplicationUntilAfter(t *testing.T) { // TestLockAndTimeout tests that the lock times out and updates can be seen after timeout func TestLockAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() primaryConn, err := mysql.Connect(ctx, &primaryTabletParams) diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go index 019c00d5b84..b613f061522 100644 --- a/go/test/endtoend/tabletmanager/main_test.go +++ b/go/test/endtoend/tabletmanager/main_test.go @@ -79,7 +79,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index 297e5540fac..aaff0ff00a0 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -69,7 +69,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestRepeatedInitShardPrimary(t *testing.T) { - defer cluster.PanicHandler(t) // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary @@ -155,7 +153,6 @@ func TestRepeatedInitShardPrimary(t *testing.T) { } func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { - defer cluster.PanicHandler(t) // Test that PTS timestamp is set when we restart the PRIMARY vttablet. // PTS = PrimaryTermStart. // See StreamHealthResponse.primary_term_start_timestamp for details. diff --git a/go/test/endtoend/tabletmanager/qps_test.go b/go/test/endtoend/tabletmanager/qps_test.go index 0611feada12..0ce41f04a63 100644 --- a/go/test/endtoend/tabletmanager/qps_test.go +++ b/go/test/endtoend/tabletmanager/qps_test.go @@ -24,12 +24,10 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestQPS(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go index df8c1f26c4e..75c6e8d4cc8 100644 --- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index 685c361cef7..b1abec3a6b7 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -83,7 +83,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index bf3747fde29..061528682d2 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -39,7 +39,6 @@ import ( // TabletReshuffle test if a vttablet can be pointed at an existing mysql func TestTabletReshuffle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -92,7 +91,6 @@ func TestTabletReshuffle(t *testing.T) { func TestHealthCheck(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) @@ -200,7 +198,6 @@ func TestHealthCheck(t *testing.T) { // TestHealthCheckSchemaChangeSignal tests the tables and views, which report their schemas have changed in the output of a StreamHealth. func TestHealthCheckSchemaChangeSignal(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(keyspaceName) @@ -381,7 +378,6 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the query service won't be shutdown // Wait if tablet is not in service state - defer cluster.PanicHandler(t) clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) err := rdonlyTablet.VttabletProcess.WaitForTabletStatus("SERVING") diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index b3b11405abb..90397a737ef 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -29,7 +29,6 @@ import ( ) func TestFallbackSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -84,7 +83,6 @@ func assertAllowedURLTest(t *testing.T, url string) { } func TestDenyAllSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -116,7 +114,6 @@ func TestDenyAllSecurityPolicy(t *testing.T) { } func TestReadOnlySecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 1d8e897a4d2..2ded055230f 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -31,7 +31,6 @@ import ( // TestEnsureDB tests that vttablet creates the db as needed func TestEnsureDB(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -67,7 +66,6 @@ func TestEnsureDB(t *testing.T) { // TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended. func TestResetReplicationParameters(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index df727802648..226238a46c6 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -100,7 +100,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -268,7 +267,6 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result } func TestInitialThrottler(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validating OK response from disabled throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, tabletmanagerdatapb.CheckThrottlerResponseCode_OK) @@ -425,7 +423,6 @@ func TestInitialThrottler(t *testing.T) { } func TestThrottleViaApplySchema(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("throttling via ApplySchema", func(t *testing.T) { vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: "online"} _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput( @@ -468,7 +465,6 @@ func TestThrottleViaApplySchema(t *testing.T) { } func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} @@ -497,7 +493,6 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { } func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) // Temporarily disable VTOrc recoveries because we want to // STOP replication specifically in order to increase the // lag and we DO NOT want VTOrc to try and fix this. @@ -636,7 +631,6 @@ func TestLag(t *testing.T) { } func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) @@ -654,7 +648,6 @@ func TestNoReplicas(t *testing.T) { } func TestCustomQuery(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { req := &vtctldatapb.UpdateThrottlerConfigRequest{Enable: true, Threshold: customThreshold, CustomQuery: customQuery} @@ -722,7 +715,6 @@ func TestCustomQuery(t *testing.T) { } func TestRestoreDefaultQuery(t *testing.T) { - defer cluster.PanicHandler(t) // Validate going back from custom-query to default-query (replication lag) still works. t.Run("enabling throttler with default query and threshold", func(t *testing.T) { diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 26eb3918a0b..074bf875165 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -97,7 +97,6 @@ Topology: We create a keyspace with two shards , having 3 tablets each. Primarie to 'zone1' and replicas/rdonly belongs to cell2. */ func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 082ecc5717f..f676af318cd 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -37,7 +37,6 @@ import ( 4. 'ListAllTablets' should return all the new tablets. */ func TestVtctldListAllTablets(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index 0f6fa6ce554..c6d48f44930 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -63,7 +63,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoRestart(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/etcd2/main_test.go b/go/test/endtoend/topotest/etcd2/main_test.go index 67b0dbbc8f7..ee2b542109b 100644 --- a/go/test/endtoend/topotest/etcd2/main_test.go +++ b/go/test/endtoend/topotest/etcd2/main_test.go @@ -64,7 +64,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/zk2/main_test.go b/go/test/endtoend/topotest/zk2/main_test.go index 48636331747..c6569519a3d 100644 --- a/go/test/endtoend/topotest/zk2/main_test.go +++ b/go/test/endtoend/topotest/zk2/main_test.go @@ -63,7 +63,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/transaction/restart/main_test.go b/go/test/endtoend/transaction/restart/main_test.go index 01185b5fa59..caa3111ad49 100644 --- a/go/test/endtoend/transaction/restart/main_test.go +++ b/go/test/endtoend/transaction/restart/main_test.go @@ -41,7 +41,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go b/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go index 2dff9f7b95f..c7bef098c05 100644 --- a/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go +++ b/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -87,7 +86,6 @@ func TestMain(m *testing.M) { } func TestTransactionRollBackWhenShutDown(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -122,7 +120,6 @@ func TestTransactionRollBackWhenShutDown(t *testing.T) { } func TestErrorInAutocommitSession(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/transaction/single/main_test.go b/go/test/endtoend/transaction/single/main_test.go index ec2dbd6378a..1eab3cea276 100644 --- a/go/test/endtoend/transaction/single/main_test.go +++ b/go/test/endtoend/transaction/single/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go index f63dbd1ae87..4d168fbdde0 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/main_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -121,7 +120,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) utils.ClearOutTable(t, vtParams, "twopc_fuzzer_insert") utils.ClearOutTable(t, vtParams, "twopc_fuzzer_update") diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go index 91c8e1ed444..6d09c174a4d 100644 --- a/go/test/endtoend/transaction/twopc/main_test.go +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -61,7 +61,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -139,7 +138,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) twopcutil.ClearOutTable(t, vtParams, "twopc_user") twopcutil.ClearOutTable(t, vtParams, "twopc_t1") twopcutil.ClearOutTable(t, vtParams, "twopc_lookup") @@ -164,7 +162,6 @@ func startWithMySQL(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go index b0a5dc4fb1a..61a43017ef9 100644 --- a/go/test/endtoend/transaction/twopc/metric/main_test.go +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -110,7 +109,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) twopcutil.ClearOutTable(t, vtParams, "twopc_user") twopcutil.ClearOutTable(t, vtParams, "twopc_t1") } diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go index ec2392f6043..4da4f86bdff 100644 --- a/go/test/endtoend/transaction/twopc/stress/main_test.go +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -123,7 +122,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) utils.ClearOutTable(t, vtParams, "twopc_t1") utils.ClearOutTable(t, vtParams, "twopc_settings") } diff --git a/go/test/endtoend/transaction/tx_test.go b/go/test/endtoend/transaction/tx_test.go index 0671d0d7136..89531952b13 100644 --- a/go/test/endtoend/transaction/tx_test.go +++ b/go/test/endtoend/transaction/tx_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -94,7 +93,6 @@ func TestMain(m *testing.M) { // TestTransactionModes tests transactions using twopc mode func TestTransactionModes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -140,7 +138,6 @@ func TestTransactionModes(t *testing.T) { // TestTransactionIsolation tests transaction isolation level. func TestTransactionIsolation(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -247,6 +244,5 @@ func start(t *testing.T) func() { return func() { deleteAll() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index 3b47e1f68dc..7d94c181abd 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -215,6 +215,18 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { return vtQr } +// ExecAssert is the same as Exec, but it only does assertions, it won't FailNow +func (mcmp *MySQLCompare) ExecAssert(query string) *sqltypes.Result { + mcmp.t.Helper() + vtQr, err := mcmp.VtConn.ExecuteFetch(query, 1000, true) + assert.NoError(mcmp.t, err, "[Vitess Error] for query: "+query) + + mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) + assert.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{}) + return vtQr +} + // ExecNoCompare executes the query on vitess and mysql but does not compare the result with each other. func (mcmp *MySQLCompare) ExecNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { mcmp.t.Helper() diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index 41b74583f69..4d3d992e879 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/mysqlvsvitess/main_test.go b/go/test/endtoend/utils/mysqlvsvitess/main_test.go index 8f162fae41d..f064afb895d 100644 --- a/go/test/endtoend/utils/mysqlvsvitess/main_test.go +++ b/go/test/endtoend/utils/mysqlvsvitess/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index 35404981164..baa82821306 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/vtgate/engine" ) // AssertContains ensures the given query result contains the expected results. @@ -160,6 +161,19 @@ func Exec(t testing.TB, conn *mysql.Conn, query string) *sqltypes.Result { return qr } +// ExecTrace executes the given query with trace using the given connection. The trace result is returned. +// The test fails if the query produces an error. +func ExecTrace(t testing.TB, conn *mysql.Conn, query string) engine.PrimitiveDescription { + t.Helper() + qr, err := conn.ExecuteFetch(fmt.Sprintf("vexplain trace %s", query), 10000, false) + require.NoError(t, err, "for query: "+query) + + // Extract the trace result and format it with indentation for pretty printing + pd, err := engine.PrimitiveDescriptionFromString(qr.Rows[0][0].ToString()) + require.NoError(t, err) + return pd +} + // ExecMulti executes the given (potential multi) queries using the given connection. // The test fails if any of the queries produces an error func ExecMulti(t testing.TB, conn *mysql.Conn, query string) error { diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index f8e19c07a0c..aab68159ca3 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -99,7 +99,6 @@ var ( ) func TestVaultAuth(t *testing.T) { - defer cluster.PanicHandler(nil) // Instantiate Vitess Cluster objects and start topo initializeClusterEarly(t) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 181b5dfc9ad..48e552c3a7c 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -72,7 +72,6 @@ var ( // TestMain is the main entry point func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -131,12 +130,10 @@ func TestMain(m *testing.M) { } func TestShards(t *testing.T) { - defer cluster.PanicHandler(t) assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards)) } func TestDeploySchema(t *testing.T) { - defer cluster.PanicHandler(t) if clusterInstance.ReusingVTDATAROOT { // we assume data is already deployed @@ -163,7 +160,6 @@ func TestDeploySchema(t *testing.T) { } func TestTablesExist(t *testing.T) { - defer cluster.PanicHandler(t) checkTables(t, "", totalTableCount) } diff --git a/go/test/endtoend/vtadmin/main_test.go b/go/test/endtoend/vtadmin/main_test.go index fd4ecbdb7fe..9233cd5b0aa 100644 --- a/go/test/endtoend/vtadmin/main_test.go +++ b/go/test/endtoend/vtadmin/main_test.go @@ -51,7 +51,6 @@ create table u_b ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -92,7 +91,6 @@ func TestMain(m *testing.M) { // TestVtadminAPIs tests the vtadmin APIs. func TestVtadminAPIs(t *testing.T) { - defer cluster.PanicHandler(t) // Test the vtadmin APIs t.Run("keyspaces api", func(t *testing.T) { diff --git a/go/test/endtoend/vtgate/concurrentdml/main_test.go b/go/test/endtoend/vtgate/concurrentdml/main_test.go index 6ee5619b742..734962b0d33 100644 --- a/go/test/endtoend/vtgate/concurrentdml/main_test.go +++ b/go/test/endtoend/vtgate/concurrentdml/main_test.go @@ -66,7 +66,6 @@ INSERT INTO t1_seq (id, next_id, cache) values(0, 1, 1000); ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -108,7 +107,6 @@ func TestMain(m *testing.M) { } func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -137,7 +135,6 @@ func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { func TestOpenTxBlocksInSerial(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -169,7 +166,6 @@ func TestOpenTxBlocksInSerial(t *testing.T) { func TestOpenTxBlocksInConcurrent(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -207,7 +203,6 @@ func TestOpenTxBlocksInConcurrent(t *testing.T) { } func TestUpdateLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/connectiondrain/main_test.go b/go/test/endtoend/vtgate/connectiondrain/main_test.go index 6dae9b72be9..6257baf8e40 100644 --- a/go/test/endtoend/vtgate/connectiondrain/main_test.go +++ b/go/test/endtoend/vtgate/connectiondrain/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() os.Exit(m.Run()) } @@ -87,7 +86,6 @@ func start(t *testing.T, vtParams mysql.ConnParams) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/consolidator/main_test.go b/go/test/endtoend/vtgate/consolidator/main_test.go index 021db7e513e..0d5eae3eca9 100644 --- a/go/test/endtoend/vtgate/consolidator/main_test.go +++ b/go/test/endtoend/vtgate/consolidator/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index 5bfec3890b5..e2925bf928d 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -81,7 +80,6 @@ func TestMain(m *testing.M) { } func TestDBDDLPlugin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 71f4a2353f7..374a92f395f 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index b4d610785b5..fe418c1c0ea 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -98,7 +98,6 @@ type fkReference struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -197,7 +196,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 34114152e4e..42498ad80f0 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -335,7 +335,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -653,7 +652,6 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { } func TestStressFK(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validate replication health", func(t *testing.T) { validateReplicationIsHealthy(t, replicaNoFK) diff --git a/go/test/endtoend/vtgate/gen4/column_name_test.go b/go/test/endtoend/vtgate/gen4/column_name_test.go index d23c03c9f6b..0f5a83a5092 100644 --- a/go/test/endtoend/vtgate/gen4/column_name_test.go +++ b/go/test/endtoend/vtgate/gen4/column_name_test.go @@ -26,11 +26,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestColumnNames(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 4c94e8e2ec8..e8280b3aa06 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -152,6 +151,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index fc4983935e9..d01d4d972a1 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -28,11 +28,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestDbNameOverride(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -55,7 +53,6 @@ func TestDbNameOverride(t *testing.T) { } func TestInformationSchemaQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -90,7 +87,6 @@ func assertSingleRowIsReturned(t *testing.T, conn *mysql.Conn, predicate string, } func TestInformationSchemaWithSubquery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -101,7 +97,6 @@ func TestInformationSchemaWithSubquery(t *testing.T) { } func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -113,7 +108,6 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T } func TestFKConstraintUsingInformationSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -131,7 +125,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { connParams := vtParams @@ -144,7 +137,6 @@ func TestConnectWithSystemSchema(t *testing.T) { } func TestUseSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -156,7 +148,6 @@ func TestUseSystemSchema(t *testing.T) { } func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -191,7 +182,6 @@ func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { } func TestMultipleSchemaPredicates(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -217,7 +207,6 @@ func TestMultipleSchemaPredicates(t *testing.T) { } func TestQuerySystemTables(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 587c189d2ea..91605394cf1 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -86,7 +86,6 @@ create table my_message( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -125,7 +124,6 @@ func TestMain(m *testing.M) { } func TestStreamMessaging(t *testing.T) { - defer cluster.PanicHandler(t) cnf := vitessdriver.Configuration{ Protocol: "grpc", diff --git a/go/test/endtoend/vtgate/grpc_api/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go index 3c8605f79a0..87d30f4ce26 100644 --- a/go/test/endtoend/vtgate/grpc_api/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -75,7 +75,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index 4971d03060b..1eb31663577 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -117,7 +117,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) } func TestRoutingWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := createCluster(nil) defer clusterInstance.Teardown() @@ -141,7 +140,6 @@ func TestRoutingWithKeyspacesToWatch(t *testing.T) { } func TestVSchemaDDLWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) extraVTGateArgs := []string{ "--vschema_ddl_authorized_users", "%", diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index b276508f269..6c43a70632b 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -54,7 +54,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { clusterInstance = cluster.NewCluster(Cell, "localhost") @@ -122,6 +121,5 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index f3804a2a45f..bbcb338fa50 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -783,7 +782,6 @@ func TestJoinWithMergedRouteWithPredicate(t *testing.T) { func TestRowCountExceed(t *testing.T) { conn, _ := start(t) defer func() { - cluster.PanicHandler(t) // needs special delete logic as it exceeds row count. for i := 50; i <= 300; i += 50 { utils.Exec(t, conn, fmt.Sprintf("delete from t1 where id1 < %d", i)) diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go index 4f5897d1f59..b970fb66b12 100644 --- a/go/test/endtoend/vtgate/mysql80/main_test.go +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -35,7 +35,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index b29eb13ecdc..5132bf87aba 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -23,15 +23,12 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" ) func TestFunctionInDefault(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -223,7 +220,6 @@ func BenchmarkReservedConnWhenSettingSysVar(b *testing.B) { } func TestJsonFunctions(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go index 9e39e7b5dd5..d5b6a639c68 100644 --- a/go/test/endtoend/vtgate/partialfailure/main_test.go +++ b/go/test/endtoend/vtgate/partialfailure/main_test.go @@ -45,7 +45,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/plan_tests/main_test.go b/go/test/endtoend/vtgate/plan_tests/main_test.go new file mode 100644 index 00000000000..d3915af0c8d --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/main_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + uks = "main" + sks = "user" + cell = "plantests" +) + +func TestMain(m *testing.M) { + vschema := readFile("vschemas/schema.json") + userVs := extractUserKS(vschema) + mainVs := extractMainKS(vschema) + sSQL := readFile("schemas/user.sql") + uSQL := readFile("schemas/main.sql") + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start unsharded keyspace + uKeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uSQL, + VSchema: mainVs, + } + err = clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start sharded keyspace + skeyspace := &cluster.Keyspace{ + Name: sks, + SchemaSQL: sSQL, + VSchema: userVs, + } + err = clusterInstance.StartKeyspace(*skeyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // TODO: (@GuptaManan100/@systay): Also run the tests with normalizer on. + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--normalize_queries=false", + "--schema_change_signal=false", + ) + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + vtParams = clusterInstance.GetVTParams(sks) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, sks, sSQL, uSQL) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitCode) +} + +func readFile(filename string) string { + schema, err := os.ReadFile(locateFile(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + return string(schema) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + return mcmp, func() { + mcmp.Close() + } +} + +func readJSONTests(filename string) []planbuilder.PlanTest { + var output []planbuilder.PlanTest + file, err := os.Open(locateFile(filename)) + if err != nil { + panic(err) + } + defer file.Close() + dec := json.NewDecoder(file) + err = dec.Decode(&output) + if err != nil { + panic(err) + } + return output +} + +func locateFile(name string) string { + return "../../../../vt/vtgate/planbuilder/testdata/" + name +} + +// verifyTestExpectations verifies the expectations of the test. +func verifyTestExpectations(t *testing.T, pd engine.PrimitiveDescription, test planbuilder.PlanTest) { + // 1. Verify that the Join primitive sees atleast 1 row on the left side. + engine.WalkPrimitiveDescription(pd, func(description engine.PrimitiveDescription) { + if description.OperatorType == "Join" { + require.NotZero(t, description.Inputs[0].RowsReceived[0]) + } + }) + + // 2. Verify that the plan description matches the expected plan description. + planBytes, err := test.Plan.MarshalJSON() + require.NoError(t, err) + mp := make(map[string]any) + err = json.Unmarshal(planBytes, &mp) + require.NoError(t, err) + pdExpected, err := engine.PrimitiveDescriptionFromMap(mp["Instructions"].(map[string]any)) + require.NoError(t, err) + require.Empty(t, pdExpected.Equals(pd), "Expected: %v\nGot: %v", string(planBytes), pd) +} + +func extractUserKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + user, ok := keyspaces["user"].(map[string]any) + if !ok { + panic("User keyspace not found") + } + + tables, ok := user["tables"].(map[string]any) + if !ok { + panic("Tables not found") + } + + userTbl, ok := tables["user"].(map[string]any) + if !ok { + panic("User table not found") + } + + delete(userTbl, "auto_increment") // TODO: we should have an unsharded keyspace where this could live + + // Marshal the inner part back to JSON string + userJson, err := json.Marshal(user) + if err != nil { + panic(err.Error()) + } + + return string(userJson) +} + +func extractMainKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + main, ok := keyspaces["main"].(map[string]any) + if !ok { + panic("main keyspace not found") + } + + // Marshal the inner part back to JSON string + mainJson, err := json.Marshal(main) + if err != nil { + panic(err.Error()) + } + + return string(mainJson) +} diff --git a/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go new file mode 100644 index 00000000000..1594e9b392c --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" +) + +func TestSelectCases(t *testing.T) { + mcmp, closer := start(t) + defer closer() + tests := readJSONTests("select_cases.json") + for _, test := range tests { + mcmp.Run(test.Comment, func(mcmp *utils.MySQLCompare) { + if test.SkipE2E { + mcmp.AsT().Skip(test.Query) + } + mcmp.Exec(test.Query) + pd := utils.ExecTrace(mcmp.AsT(), mcmp.VtConn, test.Query) + verifyTestExpectations(mcmp.AsT(), pd, test) + if mcmp.VtConn.IsClosed() { + mcmp.AsT().Fatal("vtgate connection is closed") + } + }) + } +} diff --git a/go/test/endtoend/vtgate/prefixfanout/main_test.go b/go/test/endtoend/vtgate/prefixfanout/main_test.go index 928808fd48e..a96ee4ce7f5 100644 --- a/go/test/endtoend/vtgate/prefixfanout/main_test.go +++ b/go/test/endtoend/vtgate/prefixfanout/main_test.go @@ -109,7 +109,6 @@ PRIMARY KEY (c1) ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -158,7 +157,6 @@ func TestMain(m *testing.M) { } func TestCFCPrefixQueryNoHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) conn, err := mysql.Connect(ctx, &vtParams) @@ -196,7 +194,6 @@ func TestCFCPrefixQueryNoHash(t *testing.T) { } func TestCFCPrefixQueryWithHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKsMD5) @@ -239,7 +236,6 @@ func TestCFCPrefixQueryWithHash(t *testing.T) { } func TestCFCInsert(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index d206f58e17c..62d23749cd7 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -66,7 +65,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go index 02013a9b0e2..bd1c1aa3b7d 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/benchmark/main_test.go b/go/test/endtoend/vtgate/queries/benchmark/main_test.go index 40a215c8007..0410f52993a 100644 --- a/go/test/endtoend/vtgate/queries/benchmark/main_test.go +++ b/go/test/endtoend/vtgate/queries/benchmark/main_test.go @@ -65,7 +65,6 @@ var shards4 = []string{ } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -169,6 +168,5 @@ func start(b *testing.B) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(b) } } diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index cb106564b2f..fe467f31c20 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/derived/main_test.go b/go/test/endtoend/vtgate/queries/derived/main_test.go index 3b44811f95c..0bab24a966a 100644 --- a/go/test/endtoend/vtgate/queries/derived/main_test.go +++ b/go/test/endtoend/vtgate/queries/derived/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go index 0c4d58aa614..bc72acc1159 100644 --- a/go/test/endtoend/vtgate/queries/dml/main_test.go +++ b/go/test/endtoend/vtgate/queries/dml/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -148,6 +147,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go index f52e2eff532..e50e9210c55 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go @@ -21,12 +21,10 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestFoundRows(t *testing.T) { - defer cluster.PanicHandler(t) mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) require.NoError(t, err) defer mcmp.Close() diff --git a/go/test/endtoend/vtgate/queries/foundrows/main_test.go b/go/test/endtoend/vtgate/queries/foundrows/main_test.go index 8f992863008..248b6cd9434 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/main_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index c5568b2db49..e158bd96e33 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } @@ -114,7 +112,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { vtConnParams := vtParams vtConnParams.DbName = dbname diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 3696617281e..76d5f44ebae 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go index 99608030246..61ddec43589 100644 --- a/go/test/endtoend/vtgate/queries/kill/main_test.go +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index a587f124762..818b834511a 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,7 +118,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index f20072031a8..ee9be542634 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index fd869c4ba5b..8738baf3267 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -30,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -50,7 +49,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go index c4b0974c24b..a1478dcd2ac 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go index 7bf702afc15..b302a0f4dc7 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/normalize/main_test.go b/go/test/endtoend/vtgate/queries/normalize/main_test.go index 8f4d97209dd..8c75d38284d 100644 --- a/go/test/endtoend/vtgate/queries/normalize/main_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/main_test.go @@ -39,7 +39,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/main_test.go b/go/test/endtoend/vtgate/queries/orderby/main_test.go index 9f18377ee3f..353745722b7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index c36b52a4e6a..716f01fb5c7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go index 00221e9c9f3..373c1327074 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go index a20c7ad54c6..956815d2a0d 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/random/main_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go index e3256f60796..85c8840924d 100644 --- a/go/test/endtoend/vtgate/queries/random/main_test.go +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go index 2d210ee7f99..20c7934d91f 100644 --- a/go/test/endtoend/vtgate/queries/random/random_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -61,7 +60,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go index c350038bf6e..03ee429e4c0 100644 --- a/go/test/endtoend/vtgate/queries/reference/main_test.go +++ b/go/test/endtoend/vtgate/queries/reference/main_test.go @@ -53,7 +53,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go index 66d46dfaf15..ce942833729 100644 --- a/go/test/endtoend/vtgate/queries/reference/reference_test.go +++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go @@ -24,8 +24,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (*mysql.Conn, func()) { @@ -35,7 +33,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/subquery/main_test.go b/go/test/endtoend/vtgate/queries/subquery/main_test.go index 9eaf3b4caa0..bc8580bd38d 100644 --- a/go/test/endtoend/vtgate/queries/subquery/main_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 4298bbe80fc..135b86195a5 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index 06e8a786469..81fcfb26095 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go index 565c3c07a4f..d1d718add25 100644 --- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -45,7 +44,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/tpch/main_test.go b/go/test/endtoend/vtgate/queries/tpch/main_test.go index 103adb336ab..403ddd510ce 100644 --- a/go/test/endtoend/vtgate/queries/tpch/main_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go index c4bf71cafa1..efa322a5e1c 100644 --- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/union/main_test.go b/go/test/endtoend/vtgate/queries/union/main_test.go index 06ec07a6c2f..a5f45f84156 100644 --- a/go/test/endtoend/vtgate/queries/union/main_test.go +++ b/go/test/endtoend/vtgate/queries/union/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/union/union_test.go b/go/test/endtoend/vtgate/queries/union/union_test.go index 03f98950f44..26371af3c87 100644 --- a/go/test/endtoend/vtgate/queries/union/union_test.go +++ b/go/test/endtoend/vtgate/queries/union/union_test.go @@ -19,7 +19,6 @@ package union import ( "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/vexplain/main_test.go b/go/test/endtoend/vtgate/queries/vexplain/main_test.go index c1c401bc573..96b6a1c41d1 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/main_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go index 45baf7af903..1a8ec2b4c37 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" @@ -49,7 +48,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 0549a9b06b0..ce6db45d24e 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -100,7 +100,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -143,7 +142,6 @@ func TestMain(m *testing.M) { } func TestRAWSettings(t *testing.T) { - defer cluster.PanicHandler(t) conn, err := mysql.Connect(context.Background(), &vtParams) require.NoError(t, err) defer conn.Close() diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index 8c0278604f7..00f569d9eb9 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -101,7 +101,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 9a4d7c50dbd..3280d64d433 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -63,7 +63,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index 915d76051a4..81bb6b90ee5 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 20d255941db..decf2f0dfdd 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index d4a61665a6d..320cbc87172 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index e7e0cfb0259..8bda7dea121 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -29,11 +29,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetSysVarSingle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { name, expr string diff --git a/go/test/endtoend/vtgate/reservedconn/udv_test.go b/go/test/endtoend/vtgate/reservedconn/udv_test.go index 55f4c54c612..14b65dbcd35 100644 --- a/go/test/endtoend/vtgate/reservedconn/udv_test.go +++ b/go/test/endtoend/vtgate/reservedconn/udv_test.go @@ -31,11 +31,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetUDV(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { @@ -123,7 +121,6 @@ func TestSetUDV(t *testing.T) { } func TestMysqlDumpInitialLog(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 6b2e8ef7e61..4c28e29ca0d 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -55,7 +55,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -101,7 +100,6 @@ func TestMain(m *testing.M) { } func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) testWithInitialSchema(t) testWithAlterSchema(t) testWithAlterDatabase(t) diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go index 9586206221e..ec201487887 100644 --- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -57,7 +57,6 @@ var ( ) func TestLoadKeyspaceWithNoTablet(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) @@ -100,7 +99,6 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { } func TestNoInitialKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index 3bb4f6dfd9f..1943fefa9d7 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestVSchemaTrackerInit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -137,7 +135,6 @@ func TestVSchemaTrackerInit(t *testing.T) { // properly handles primary tablet restarts -- meaning that we maintain // the exact same vschema state as before the restart. func TestVSchemaTrackerKeyspaceReInit(t *testing.T) { - defer cluster.PanicHandler(t) primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 50042f3142a..5f82bd5d71a 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -161,7 +160,6 @@ func TestNewTable(t *testing.T) { } func TestAmbiguousColumnJoin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 09bd97eb9fe..6fbd3f3d33c 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -122,7 +122,6 @@ create table t8( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -208,7 +207,6 @@ func TestMain(m *testing.M) { } func TestAddColumn(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 5ecf89a5db7..4256727915d 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -112,7 +111,6 @@ func TestMain(m *testing.M) { } func TestNewUnshardedTable(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() @@ -182,7 +180,6 @@ func TestNewUnshardedTable(t *testing.T) { // creating two tables having the same name differing only in casing, but other operating systems don't. // More information at https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html#:~:text=Table%20names%20are%20stored%20in,lowercase%20on%20storage%20and%20lookup. func TestCaseSensitiveSchemaTracking(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() diff --git a/go/test/endtoend/vtgate/sec_vind/main_test.go b/go/test/endtoend/vtgate/sec_vind/main_test.go index 7aa5df76a83..7ec0d5c0682 100644 --- a/go/test/endtoend/vtgate/sec_vind/main_test.go +++ b/go/test/endtoend/vtgate/sec_vind/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -101,7 +100,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index 1bda37094b2..0fc1c810eb3 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -174,7 +174,6 @@ CREATE TABLE allDefaults ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -216,7 +215,6 @@ func TestMain(m *testing.M) { } func TestSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -274,7 +272,6 @@ func TestSeq(t *testing.T) { } func TestDotTableSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -297,7 +294,6 @@ func TestDotTableSeq(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index d6357ce8f2a..77e9a58cf69 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -89,7 +89,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 50529d9fdf9..3457a2cab3c 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -86,7 +86,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index e1818735ed1..307bb7fcf23 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -147,7 +147,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -192,7 +191,6 @@ func TestMain(m *testing.M) { func TestSelectIntoAndLoadFrom(t *testing.T) { // Test is skipped because it requires secure-file-priv variable to be set to not NULL or empty. t.Skip() - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -227,7 +225,6 @@ func TestSelectIntoAndLoadFrom(t *testing.T) { } func TestEmptyStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -244,7 +241,6 @@ func TestEmptyStatement(t *testing.T) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -264,7 +260,6 @@ func TestTopoDownServingQuery(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -279,7 +274,6 @@ func TestInsertAllDefaults(t *testing.T) { } func TestDDLUnsharded(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -300,7 +294,6 @@ func TestDDLUnsharded(t *testing.T) { } func TestCallProcedure(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -347,7 +340,6 @@ func TestCallProcedure(t *testing.T) { } func TestTempTable(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -372,7 +364,6 @@ func TestTempTable(t *testing.T) { } func TestReservedConnDML(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -395,7 +386,6 @@ func TestReservedConnDML(t *testing.T) { } func TestNumericPrecisionScale(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go index 3251668e155..84c2c825784 100644 --- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go +++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go @@ -265,7 +265,6 @@ CREATE TABLE thex ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -304,7 +303,6 @@ func TestMain(m *testing.M) { } func TestVindexHexTypes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -326,7 +324,6 @@ func TestVindexHexTypes(t *testing.T) { } func TestVindexBindVarOverlap(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go index eec54f8f47f..5cd01449b71 100644 --- a/go/test/endtoend/vtgate/vschema/vschema_test.go +++ b/go/test/endtoend/vtgate/vschema/vschema_test.go @@ -18,21 +18,25 @@ package vschema import ( "context" + "encoding/json" "flag" "fmt" "os" + "path" "testing" + "time" - "vitess.io/vitess/go/test/endtoend/utils" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) var ( clusterInstance *cluster.LocalProcessCluster + configFile string vtParams mysql.ConnParams hostname = "localhost" keyspaceName = "ks" @@ -53,7 +57,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -66,7 +69,21 @@ func TestMain(m *testing.M) { } // List of users authorized to execute vschema ddl operations - clusterInstance.VtGateExtraArgs = []string{"--vschema_ddl_authorized_users=%", "--schema_change_signal=false"} + if utils.BinaryIsAtLeastAtVersion(22, "vtgate") { + timeNow := time.Now().Unix() + configFile = path.Join(os.TempDir(), fmt.Sprintf("vtgate-config-%d.json", timeNow)) + err := writeConfig(configFile, map[string]string{ + "vschema_ddl_authorized_users": "%", + }) + if err != nil { + return 1, err + } + defer os.Remove(configFile) + + clusterInstance.VtGateExtraArgs = []string{fmt.Sprintf("--config-file=%s", configFile), "--schema_change_signal=false"} + } else { + clusterInstance.VtGateExtraArgs = []string{"--vschema_ddl_authorized_users=%", "--schema_change_signal=false"} + } // Start keyspace keyspace := &cluster.Keyspace{ @@ -96,8 +113,16 @@ func TestMain(m *testing.M) { } +func writeConfig(path string, cfg map[string]string) error { + file, err := os.Create(path) + if err != nil { + return err + } + defer file.Close() + return json.NewEncoder(file).Encode(cfg) +} + func TestVSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -138,4 +163,15 @@ func TestVSchema(t *testing.T) { utils.AssertMatches(t, conn, "delete from vt_user", `[]`) + if utils.BinaryIsAtLeastAtVersion(22, "vtgate") { + writeConfig(configFile, map[string]string{ + "vschema_ddl_authorized_users": "", + }) + + require.EventuallyWithT(t, func(t *assert.CollectT) { + _, err = conn.ExecuteFetch("ALTER VSCHEMA DROP TABLE main", 1000, false) + assert.Error(t, err) + assert.ErrorContains(t, err, "is not authorized to perform vschema operations") + }, 5*time.Second, 100*time.Millisecond) + } } diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 638ea5fa72e..3fe43fa8f8f 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -33,7 +33,6 @@ import ( // TestAPIEndpoints tests the various API endpoints that VTOrc offers. func TestAPIEndpoints(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") diff --git a/go/test/endtoend/vtorc/api/config_test.go b/go/test/endtoend/vtorc/api/config_test.go index 71cc6291be7..821b0f8071e 100644 --- a/go/test/endtoend/vtorc/api/config_test.go +++ b/go/test/endtoend/vtorc/api/config_test.go @@ -30,7 +30,6 @@ import ( // TestDynamicConfigs tests the dyanamic configurations that VTOrc offers. func TestDynamicConfigs(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 1, "") vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] diff --git a/go/test/endtoend/vtorc/api/main_test.go b/go/test/endtoend/vtorc/api/main_test.go index f89326bc856..cc3e796b293 100644 --- a/go/test/endtoend/vtorc/api/main_test.go +++ b/go/test/endtoend/vtorc/api/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/main_test.go b/go/test/endtoend/vtorc/general/main_test.go index 6db0792de3a..0cd88cd378c 100644 --- a/go/test/endtoend/vtorc/general/main_test.go +++ b/go/test/endtoend/vtorc/general/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -47,8 +46,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 9cc931897bf..a4ed71945be 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -40,7 +40,6 @@ import ( // verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call func TestPrimaryElection(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 2, "") @@ -66,7 +65,6 @@ func TestPrimaryElection(t *testing.T) { // if it has an errant GTID. func TestErrantGTIDOnPreviousPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{}, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -126,7 +124,6 @@ func TestErrantGTIDOnPreviousPrimary(t *testing.T) { // verify replication is setup func TestSingleKeyspace(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -145,7 +142,6 @@ func TestSingleKeyspace(t *testing.T) { // verify replication is setup func TestKeyspaceShard(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -167,7 +163,6 @@ func TestKeyspaceShard(t *testing.T) { // 6. disable recoveries and make sure the detected problems are set correctly. func TestVTOrcRepairs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -346,7 +341,6 @@ func TestRepairAfterTER(t *testing.T) { // test fails intermittently on CI, skip until it can be fixed. t.SkipNow() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -480,7 +474,6 @@ func TestSemiSync(t *testing.T) { // TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld func TestVTOrcWithPrs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -530,7 +523,6 @@ func TestVTOrcWithPrs(t *testing.T) { // TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies func TestMultipleDurabilities(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "") // Setup a semi-sync cluster @@ -551,7 +543,6 @@ func TestMultipleDurabilities(t *testing.T) { // TestDrainedTablet tests that we don't forget drained tablets and they still show up in the vtorc output. func TestDrainedTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{}, 1, "") @@ -639,7 +630,6 @@ func TestDurabilityPolicySetLater(t *testing.T) { func TestFullStatusConnectionPooling(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, []string{ "--tablet_manager_grpc_concurrency=1", }, cluster.VTOrcConfiguration{ diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index a3e50bd0cc9..cd03df01bd6 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index a46e3789730..9017d35a8c5 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -39,7 +39,6 @@ import ( // Also tests that VTOrc can handle multiple failures, if the durability policies allow it func TestDownPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -116,7 +115,6 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -172,7 +170,6 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -239,7 +236,6 @@ func TestDeletedPrimaryTablet(t *testing.T) { // that primary is unreachable. This help us save few seconds depending on value of `RemoteOperationTimeout` flag. func TestDeadPrimaryRecoversImmediately(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -322,7 +318,6 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // covers part of the test case master-failover-lost-replicas from orchestrator func TestCrossDataCenterFailure(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -368,7 +363,6 @@ func TestCrossDataCenterFailure(t *testing.T) { // In case of no viable candidates, we should error out func TestCrossDataCenterFailureError(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -415,7 +409,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // were detected by vtorc and could be configured to have their sources detached t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -495,7 +488,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 59", FailPrimaryPromotionOnLagMinutes: 1, @@ -545,7 +537,6 @@ func TestPromotionLagFailure(t *testing.T) { // was smaller than the configured value, otherwise it would fail the promotion t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 61", FailPrimaryPromotionOnLagMinutes: 1, @@ -598,7 +589,6 @@ func TestPromotionLagFailure(t *testing.T) { // That is the replica which should be promoted in case of primary failure func TestDownPrimaryPromotionRule(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -646,7 +636,6 @@ func TestDownPrimaryPromotionRule(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -726,7 +715,6 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, PreventCrossCellFailover: true, diff --git a/go/viperutil/internal/sync/sync.go b/go/viperutil/internal/sync/sync.go index 6bee1a14e72..f69829f734d 100644 --- a/go/viperutil/internal/sync/sync.go +++ b/go/viperutil/internal/sync/sync.go @@ -86,7 +86,7 @@ func (v *Viper) Set(key string, value any) { v.m.Lock() defer v.m.Unlock() - // We must not update v.disk here; explicit calls to Set will supercede all + // We must not update v.disk here; explicit calls to Set will supersede all // future config reloads. v.live.Set(key, value) diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go index 3ba6b187a2f..dbd146495e8 100644 --- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go +++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go @@ -32,8 +32,9 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/viperutil" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" @@ -203,9 +204,9 @@ type AZBlobBackupHandle struct { name string readOnly bool waitGroup sync.WaitGroup - errors concurrency.AllErrorRecorder ctx context.Context cancel context.CancelFunc + errors.PerFileErrorRecorder } // Directory implements BackupHandle. @@ -218,21 +219,6 @@ func (bh *AZBlobBackupHandle) Name() string { return bh.name } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) Error() error { - return bh.errors.Error() -} - // AddFile implements BackupHandle. func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { if bh.readOnly { @@ -263,7 +249,7 @@ func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, file }) if err != nil { reader.CloseWithError(err) - bh.RecordError(err) + bh.RecordError(filename, err) } }() diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index fb3d0e2d125..eeb14039d01 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -272,7 +272,7 @@ func getBackupManifestInto(ctx context.Context, backup backupstorage.BackupHandl if err := json.NewDecoder(file).Decode(outManifest); err != nil { return vterrors.Wrap(err, "can't decode MANIFEST") } - return nil + return backup.Error() } // IncrementalBackupDetails lists some incremental backup specific information diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index 92bc71d63aa..4fd37b3163a 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -25,7 +25,8 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/vt/servenv" ) @@ -89,9 +90,9 @@ type BackupHandle interface { // ReadCloser is closed. ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) - // concurrency.ErrorRecorder is embedded here to coordinate reporting and - // handling of errors among all the components involved in taking a backup. - concurrency.ErrorRecorder + // BackupErrorRecorder is embedded here to coordinate reporting and + // handling of errors among all the components involved in taking/restoring a backup. + errors.BackupErrorRecorder } // BackupStorage is the interface to the storage system diff --git a/go/vt/mysqlctl/backup_blackbox_race_test.go b/go/vt/mysqlctl/blackbox/backup_race_test.go similarity index 97% rename from go/vt/mysqlctl/backup_blackbox_race_test.go rename to go/vt/mysqlctl/blackbox/backup_race_test.go index 5414ebc5fa6..fd39dfe4b06 100644 --- a/go/vt/mysqlctl/backup_blackbox_race_test.go +++ b/go/vt/mysqlctl/blackbox/backup_race_test.go @@ -16,8 +16,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +// Package blackbox is the blackbox tests for package mysqlctl. +package blackbox import ( "fmt" @@ -75,7 +75,7 @@ func TestExecuteBackupWithFailureOnLastFile(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -144,7 +144,7 @@ func TestExecuteBackupWithFailureOnLastFile(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.ErrorContains(t, err, "cannot add file: 3") diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/blackbox/backup_test.go similarity index 57% rename from go/vt/mysqlctl/backup_blackbox_test.go rename to go/vt/mysqlctl/blackbox/backup_test.go index 15244fb8782..b7e35304904 100644 --- a/go/vt/mysqlctl/backup_blackbox_test.go +++ b/go/vt/mysqlctl/blackbox/backup_test.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +// Package blackbox is the blackbox tests for package mysqlctl. +package blackbox import ( + "bytes" "context" + "errors" "fmt" + "io" "os" "path" "strings" @@ -31,7 +34,6 @@ import ( "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -48,40 +50,6 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" ) -const mysqlShutdownTimeout = 1 * time.Minute - -func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { - old := mysqlctl.BuiltinBackupMysqldTimeout - mysqlctl.BuiltinBackupMysqldTimeout = t - - return old -} - -func createBackupDir(root string, dirs ...string) error { - for _, dir := range dirs { - if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { - return err - } - } - - return nil -} - -func createBackupFiles(root string, fileCount int, ext string) error { - for i := 0; i < fileCount; i++ { - f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) - if err != nil { - return err - } - if _, err := f.Write([]byte("hello, world!")); err != nil { - return err - } - defer f.Close() - } - - return nil -} - func TestExecuteBackup(t *testing.T) { ctx := utils.LeakCheckContext(t) @@ -97,7 +65,7 @@ func TestExecuteBackup(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -133,8 +101,8 @@ func TestExecuteBackup(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) + oldDeadline := SetBuiltinBackupMysqldDeadline(time.Second) + defer SetBuiltinBackupMysqldDeadline(oldDeadline) bh := filebackupstorage.NewBackupHandle(nil, "", "", false) @@ -163,7 +131,7 @@ func TestExecuteBackup(t *testing.T) { Keyspace: keyspace, Shard: shard, Stats: fakeStats, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -221,7 +189,7 @@ func TestExecuteBackup(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) assert.Error(t, err) @@ -243,7 +211,7 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -279,8 +247,8 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) + oldDeadline := SetBuiltinBackupMysqldDeadline(time.Second) + defer SetBuiltinBackupMysqldDeadline(oldDeadline) bh := filebackupstorage.NewBackupHandle(nil, "", "", false) @@ -310,7 +278,7 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { Shard: shard, Stats: backupstats.NewFakeStats(), UpgradeSafe: true, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -336,7 +304,7 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -397,12 +365,12 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.Error(t, err) // all four files will fail - require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") + require.ErrorContains(t, err, "context canceled; context canceled; context canceled; context canceled") assert.Equal(t, mysqlctl.BackupUnusable, backupResult) } @@ -425,7 +393,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -482,7 +450,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -521,7 +489,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { RestoreToTimestamp: time.Time{}, DryRun: false, Stats: fakeStats, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, } // Successful restore. @@ -587,24 +555,374 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { } } -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err +type rwCloseFailFirstCall struct { + *bytes.Buffer + firstDone bool +} + +func (w *rwCloseFailFirstCall) Write(p []byte) (n int, err error) { + if w.firstDone { + return w.Buffer.Write(p) } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err + w.firstDone = true + return 0, errors.New("failing first write") +} + +func (w *rwCloseFailFirstCall) Read(p []byte) (n int, err error) { + if w.firstDone { + return w.Buffer.Read(p) } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - capableOf := mysql.ServerVersionCapableOf(versionStr) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + w.firstDone = true + return 0, errors.New("failing first read") +} + +func (w *rwCloseFailFirstCall) Close() error { + return nil +} + +func newWriteCloseFailFirstWrite(firstWriteDone bool) *rwCloseFailFirstCall { + return &rwCloseFailFirstCall{ + Buffer: bytes.NewBuffer(nil), + firstDone: firstWriteDone, } - return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) +} + +func TestExecuteBackupFailToWriteEachFileOnlyOnce(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + be := &mysqlctl.BuiltinBackupEngine{} + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // This mimics what happens with the other BackupHandles where doing AddFile will either truncate or override + // any existing data if the same filename already exists. + _, isRetry := bufferPerFiles[filename] + newBuffer := newWriteCloseFailFirstWrite(isRetry) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + logger := logutil.NewMemoryLogger() + ctx, cancel := context.WithCancel(ctx) + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + cancel() + + expectedLogs := []string{ + "Backing up file: test1/0.ibd (attempt 1/2)", + "Backing up file: test1/1.ibd (attempt 1/2)", + "Backing up file: test2/0.ibd (attempt 1/2)", + "Backing up file: test2/1.ibd (attempt 1/2)", + + "Backing up file: test1/0.ibd (attempt 2/2)", + "Backing up file: test1/1.ibd (attempt 2/2)", + "Backing up file: test2/0.ibd (attempt 2/2)", + "Backing up file: test2/1.ibd (attempt 2/2)", + + "Backing up file MANIFEST (attempt 1/2)", + "Failed backing up MANIFEST (attempt 1/2)", + "Backing up file MANIFEST (attempt 2/2)", + "Completed backing up MANIFEST (attempt 2/2)", + } + + // Sleep just long enough for everything to complete. + // It's not flaky, the race detector detects a race when there isn't, + // the machine is just too slow to propagate the ctxCancel() to all goroutines. + time.Sleep(2 * time.Second) + AssertLogs(t, expectedLogs, logger) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) +} + +func TestExecuteBackupFailToWriteFileTwice(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 1, 1) + + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + be := &mysqlctl.BuiltinBackupEngine{} + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + newBuffer := newWriteCloseFailFirstWrite(false) + bufferPerFiles[filename] = newBuffer + + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + logger := logutil.NewMemoryLogger() + fakeStats := backupstats.NewFakeStats() + ctx, cancel := context.WithCancel(ctx) + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: fakeStats, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + cancel() + + // Sleep just long enough for everything to complete. + // It's not flaky, the race detector detects a race when there isn't, + // the machine is just too slow to propagate the ctxCancel() to all goroutines. + time.Sleep(2 * time.Second) + + expectedLogs := []string{ + "Backing up file: test1/0.ibd (attempt 1/2)", + "Backing up file: test1/0.ibd (attempt 2/2)", + } + AssertLogs(t, expectedLogs, logger) + + ss := GetStats(fakeStats) + require.Equal(t, 2, ss.DestinationCloseStats) + require.Equal(t, 2, ss.DestinationOpenStats) + require.Equal(t, 2, ss.DestinationWriteStats) + require.Equal(t, 2, ss.SourceCloseStats) + require.Equal(t, 2, ss.SourceOpenStats) + require.Equal(t, 2, ss.SourceReadStats) + require.ErrorContains(t, err, "failing first write") + require.Equal(t, mysqlctl.BackupUnusable, backupResult) +} + +func TestExecuteRestoreFailToReadEachFileOnlyOnce(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // let's never make it fail for now + newBuffer := newWriteCloseFailFirstWrite(true) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // let's mark each file in the buffer as if it is their first read + for key := range bufferPerFiles { + bufferPerFiles[key].firstDone = false + } + + // Now try to restore the above backup. + fakeBh := &mysqlctl.FakeBackupHandle{} + fakeBh.ReadFileReturnF = func(ctx context.Context, filename string) (io.ReadCloser, error) { + return bufferPerFiles[filename], nil + } + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, fakeBh) + assert.NoError(t, err) + assert.NotNil(t, bm) + + ss := GetStats(fakeStats) + require.Equal(t, 8, ss.DestinationCloseStats) + require.Equal(t, 8, ss.DestinationOpenStats) + require.Equal(t, 4, ss.DestinationWriteStats) + require.Equal(t, 8, ss.SourceCloseStats) + require.Equal(t, 8, ss.SourceOpenStats) + require.Equal(t, 8, ss.SourceReadStats) +} + +func TestExecuteRestoreFailToReadEachFileTwice(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // let's never make it fail for now + newBuffer := newWriteCloseFailFirstWrite(true) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // Now try to restore the above backup. + fakeBh := &mysqlctl.FakeBackupHandle{} + fakeBh.ReadFileReturnF = func(ctx context.Context, filename string) (io.ReadCloser, error) { + // always make it fail, expect if it is the MANIFEST file, otherwise we won't start restoring the other files + buffer := bufferPerFiles[filename] + if filename != "MANIFEST" { + buffer.firstDone = false + } + return buffer, nil + } + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, fakeBh) + assert.ErrorContains(t, err, "failing first read") + assert.Nil(t, bm) + + ss := GetStats(fakeStats) + require.Equal(t, 5, ss.DestinationCloseStats) + require.Equal(t, 5, ss.DestinationOpenStats) + require.Equal(t, 0, ss.DestinationWriteStats) + require.Equal(t, 5, ss.SourceCloseStats) + require.Equal(t, 5, ss.SourceOpenStats) + require.Equal(t, 5, ss.SourceReadStats) } diff --git a/go/vt/mysqlctl/blackbox/utils.go b/go/vt/mysqlctl/blackbox/utils.go new file mode 100644 index 00000000000..e4e3f11fb3c --- /dev/null +++ b/go/vt/mysqlctl/blackbox/utils.go @@ -0,0 +1,196 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package blackbox + +import ( + "context" + "fmt" + "os" + "path" + "slices" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +type StatSummary struct { + DestinationCloseStats int + DestinationOpenStats int + DestinationWriteStats int + SourceCloseStats int + SourceOpenStats int + SourceReadStats int +} + +func GetStats(stats *backupstats.FakeStats) StatSummary { + var ss StatSummary + + for _, sr := range stats.ScopeReturns { + switch sr.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + ss.DestinationCloseStats += len(sr.TimedIncrementCalls) + case "Destination:Open": + ss.DestinationOpenStats += len(sr.TimedIncrementCalls) + case "Destination:Write": + if len(sr.TimedIncrementBytesCalls) > 0 { + ss.DestinationWriteStats++ + } + case "Source:Close": + ss.SourceCloseStats += len(sr.TimedIncrementCalls) + case "Source:Open": + ss.SourceOpenStats += len(sr.TimedIncrementCalls) + case "Source:Read": + if len(sr.TimedIncrementBytesCalls) > 0 { + ss.SourceReadStats++ + } + } + } + return ss +} + +func AssertLogs(t *testing.T, expectedLogs []string, logger *logutil.MemoryLogger) { + for _, log := range expectedLogs { + require.Truef(t, slices.ContainsFunc(logger.Events, func(event *logutilpb.Event) bool { + return event.GetValue() == log + }), "%s is missing from the logs", log) + } +} + +func SetupCluster(ctx context.Context, t *testing.T, dirs, filesPerDir int) (backupRoot string, keyspace string, shard string, ts *topo.Server) { + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot = fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + for dirI := range dirs { + dirName := "test" + strconv.Itoa(dirI+1) + require.NoError(t, createBackupDir(dataDir, dirName)) + require.NoError(t, createBackupFiles(path.Join(dataDir, dirName), filesPerDir, "ibd")) + } + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(backupRoot)) + }) + + needIt, err := NeedInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard = "mykeyspace", "-" + ts = memorytopo.NewServer(ctx, "cell1") + t.Cleanup(func() { + ts.Close() + }) + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + return backupRoot, keyspace, shard, ts +} + +// NeedInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. +// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the +// (/. by default) called "#innodb_redo". See: +// +// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity +func NeedInnoDBRedoLogSubdir() (needIt bool, err error) { + mysqldVersionStr, err := mysqlctl.GetVersionString() + if err != nil { + return needIt, err + } + _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) + if err != nil { + return needIt, err + } + versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) + capableOf := mysql.ServerVersionCapableOf(versionStr) + if capableOf == nil { + return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + } + return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) +} + +const MysqlShutdownTimeout = 1 * time.Minute + +func SetBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { + old := mysqlctl.BuiltinBackupMysqldTimeout + mysqlctl.BuiltinBackupMysqldTimeout = t + + return old +} + +func createBackupDir(root string, dirs ...string) error { + for _, dir := range dirs { + if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { + return err + } + } + + return nil +} + +func createBackupFiles(root string, fileCount int, ext string) error { + for i := 0; i < fileCount; i++ { + f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) + if err != nil { + return err + } + if _, err := f.Write([]byte("hello, world!")); err != nil { + return err + } + defer f.Close() + } + + return nil +} diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 5aa759f6f7a..2046a238400 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -29,18 +29,17 @@ import ( "os" "path" "path/filepath" - "sync" + "strconv" "sync/atomic" "time" "github.com/spf13/pflag" - "golang.org/x/sync/semaphore" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/ioutil" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" @@ -60,6 +59,8 @@ const ( builtinBackupEngineName = "builtin" AutoIncrementalFromPos = "auto" dataDictionaryFile = "mysql.ibd" + + maxRetriesPerFile = 1 ) var ( @@ -149,6 +150,13 @@ type FileEntry struct { // ParentPath is an optional prefix to the Base path. If empty, it is ignored. Useful // for writing files in a temporary directory ParentPath string + + // RetryCount specifies how many times we retried restoring/backing up this FileEntry. + // If we fail to restore/backup this FileEntry, we will retry up to maxRetriesPerFile times. + // Every time the builtin backup engine retries this file, we increment this field by 1. + // We don't care about adding this information to the MANIFEST and also to not cause any compatibility issue + // we are adding the - json tag to let Go know it can ignore the field. + RetryCount int `json:"-"` } func init() { @@ -585,6 +593,11 @@ func (be *BuiltinBackupEngine) backupFiles( mysqlVersion string, incrDetails *IncrementalBackupDetails, ) (finalErr error) { + // backupFiles always wait for AddFiles to finish its work before returning, unless there has been a + // non-recoverable error in the process, in both cases we can cancel the context safely. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Get the files to backup. // We don't care about totalSize because we add each file separately. var fes []FileEntry @@ -599,43 +612,82 @@ func (be *BuiltinBackupEngine) backupFiles( } params.Logger.Infof("found %v files to backup", len(fes)) - // Backup with the provided concurrency. - sema := semaphore.NewWeighted(int64(params.Concurrency)) - wg := sync.WaitGroup{} + // The error here can be ignored safely. Failed FileEntry's are handled in the next 'if' statement. + _ = be.backupFileEntries(ctx, fes, bh, params) + // BackupHandle supports the BackupErrorRecorder interface for tracking errors + // across any goroutines that fan out to take the backup. This means that we + // don't need a local error recorder and can put everything through the bh. + // + // This handles the scenario where bh.AddFile() encounters an error asynchronously, + // which ordinarily would be lost in the context of `be.backupFile`, i.e. if an + // error were encountered + // [here](https://github.com/vitessio/vitess/blob/d26b6c7975b12a87364e471e2e2dfa4e253c2a5b/go/vt/mysqlctl/s3backupstorage/s3.go#L139-L142). + // + // All the errors are grouped per file, if one or more files failed, we back them up + // once more concurrently, if any of the retry fail, we fail-fast by canceling the context + // and return an error. There is no reason to continue processing the other retries, if + // one of them failed. + if files := bh.GetFailedFiles(); len(files) > 0 { + newFEs := make([]FileEntry, len(fes)) + for _, file := range files { + fileNb, err := strconv.Atoi(file) + if err != nil { + return vterrors.Wrapf(err, "failed to retry file '%s'", file) + } + oldFes := fes[fileNb] + newFEs[fileNb] = FileEntry{ + Base: oldFes.Base, + Name: oldFes.Name, + ParentPath: oldFes.ParentPath, + RetryCount: 1, + } + bh.ResetErrorForFile(file) + } + err = be.backupFileEntries(ctx, newFEs, bh, params) + if err != nil { + return err + } + } + + // Backup the MANIFEST file and apply retry logic. + var manifestErr error + for currentRetry := 0; currentRetry <= maxRetriesPerFile; currentRetry++ { + manifestErr = be.backupManifest(ctx, params, bh, backupPosition, purgedPosition, fromPosition, fromBackupName, serverUUID, mysqlVersion, incrDetails, fes, currentRetry) + if manifestErr == nil { + break + } + bh.ResetErrorForFile(backupManifestFileName) + } + if manifestErr != nil { + return manifestErr + } + return nil +} + +// backupFileEntries iterates over a slice of FileEntry, backing them up concurrently up to the defined concurrency limit. +// This function will ignore empty FileEntry, allowing the retry mechanism to send a partially empty slice, to not +// mess up the index of retriable FileEntry. +// This function does not leave any background operation behind itself, all calls to bh.AddFile will be finished or canceled. +func (be *BuiltinBackupEngine) backupFileEntries(ctx context.Context, fes []FileEntry, bh backupstorage.BackupHandle, params BackupParams) error { ctxCancel, cancel := context.WithCancel(ctx) defer func() { - // We may still have operations in flight that require a valid context, such as adding files to S3. - // Unless we encountered an error, we should not cancel the context, this is taken care of later - // in the process. If we encountered an error however, we can safely cancel the context as we should - // no longer work on anything and exit fast. - if finalErr != nil { - cancel() - } + // If we reached this defer in all cases we can cancel the context. + // The only ways to get here are: a panic, an error when ending the backup, a successful backup. + // For all three options, it is safe to cancel the context, there should be no pending operations + // that 1) haven't completed, 2) we care about anymore. + cancel() }() + g := errgroup.Group{} + g.SetLimit(params.Concurrency) for i := range fes { - wg.Add(1) - go func(i int) { - defer wg.Done() + if fes[i].Name == "" { + continue + } + g.Go(func() error { fe := &fes[i] - // Wait until we are ready to go, return if we encounter an error - acqErr := sema.Acquire(ctxCancel, 1) - if acqErr != nil { - log.Errorf("Unable to acquire semaphore needed to backup file: %s, err: %s", fe.Name, acqErr.Error()) - bh.RecordError(acqErr) - cancel() - return - } - defer sema.Release(1) - - // First check if we have any error, if we have, there is no point trying backing up this file. - // We check for errors before checking if the context is canceled on purpose, if there was an - // error, the context would have been canceled already. - if bh.HasErrors() { - params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error()) - return - } + name := fmt.Sprintf("%v", i) // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, @@ -644,83 +696,30 @@ func (be *BuiltinBackupEngine) backupFiles( select { case <-ctxCancel.Done(): log.Errorf("Context canceled or timed out during %q backup", fe.Name) - bh.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) - return + bh.RecordError(name, vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return nil default: } // Backup the individual file. - name := fmt.Sprintf("%v", i) - if err := be.backupFile(ctxCancel, params, bh, fe, name); err != nil { - bh.RecordError(err) - cancel() + var errBackupFile error + if errBackupFile = be.backupFile(ctxCancel, params, bh, fe, name); errBackupFile != nil { + bh.RecordError(name, vterrors.Wrapf(errBackupFile, "failed to backup file '%s'", name)) + if fe.RetryCount >= maxRetriesPerFile { + // this is the last attempt, and we have an error, we can cancel everything and fail fast. + cancel() + } } - }(i) + return nil + }) } + _ = g.Wait() - wg.Wait() - - // BackupHandle supports the ErrorRecorder interface for tracking errors - // across any goroutines that fan out to take the backup. This means that we - // don't need a local error recorder and can put everything through the bh. - // - // This handles the scenario where bh.AddFile() encounters an error asynchronously, - // which ordinarily would be lost in the context of `be.backupFile`, i.e. if an - // error were encountered - // [here](https://github.com/vitessio/vitess/blob/d26b6c7975b12a87364e471e2e2dfa4e253c2a5b/go/vt/mysqlctl/s3backupstorage/s3.go#L139-L142). - if bh.HasErrors() { - return bh.Error() - } - - // open the MANIFEST - wc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) + err := bh.EndBackup(ctx) if err != nil { - return vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) - } - defer func() { - closeErr := wc.Close() - if finalErr == nil { - finalErr = closeErr - } - }() - - // JSON-encode and write the MANIFEST - bm := &builtinBackupManifest{ - // Common base fields - BackupManifest: BackupManifest{ - BackupName: bh.Name(), - BackupMethod: builtinBackupEngineName, - Position: backupPosition, - PurgedPosition: purgedPosition, - FromPosition: fromPosition, - FromBackup: fromBackupName, - Incremental: !fromPosition.IsZero(), - ServerUUID: serverUUID, - TabletAlias: params.TabletAlias, - Keyspace: params.Keyspace, - Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), - MySQLVersion: mysqlVersion, - UpgradeSafe: params.UpgradeSafe, - IncrementalDetails: incrDetails, - }, - - // Builtin-specific fields - FileEntries: fes, - SkipCompress: !backupStorageCompress, - CompressionEngine: CompressionEngineName, - ExternalDecompressor: ManifestExternalDecompressorCmd, - } - data, err := json.MarshalIndent(bm, "", " ") - if err != nil { - return vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) - } - if _, err := wc.Write([]byte(data)); err != nil { - return vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) + return err } - - return nil + return bh.Error() } type backupPipe struct { @@ -733,6 +732,7 @@ type backupPipe struct { crc32 hash.Hash32 nn int64 done chan struct{} + failed chan struct{} closed int32 } @@ -743,6 +743,7 @@ func newBackupWriter(filename string, writerBufferSize int, maxSize int64, w io. filename: filename, maxSize: maxSize, done: make(chan struct{}), + failed: make(chan struct{}), } } @@ -752,10 +753,16 @@ func newBackupReader(filename string, maxSize int64, r io.Reader) *backupPipe { r: r, filename: filename, done: make(chan struct{}), + failed: make(chan struct{}), maxSize: maxSize, } } +func retryToString(retry int) string { + // We convert the retry number to an attempt number, increasing retry by one, so it looks more human friendly + return fmt.Sprintf("(attempt %d/%d)", retry+1, maxRetriesPerFile+1) +} + func (bp *backupPipe) Read(p []byte) (int, error) { nn, err := bp.r.Read(p) _, _ = bp.crc32.Write(p[:nn]) @@ -770,9 +777,17 @@ func (bp *backupPipe) Write(p []byte) (int, error) { return nn, err } -func (bp *backupPipe) Close() error { +func (bp *backupPipe) Close(isDone bool) (err error) { if atomic.CompareAndSwapInt32(&bp.closed, 0, 1) { - close(bp.done) + // If we fail to Flush the writer we must report this backup as a failure. + defer func() { + if isDone && err == nil { + close(bp.done) + return + } + close(bp.failed) + }() + if bp.w != nil { if err := bp.w.Flush(); err != nil { return err @@ -786,28 +801,31 @@ func (bp *backupPipe) HashString() string { return hex.EncodeToString(bp.crc32.Sum(nil)) } -func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, logger logutil.Logger, restore bool) { - messageStr := "restoring " +func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, logger logutil.Logger, restore bool, retryStr string) { + messageStr := "restoring" if !restore { - messageStr = "backing up " + messageStr = "backing up" } tick := time.NewTicker(period) defer tick.Stop() for { select { case <-ctx.Done(): - logger.Infof("Canceled %s of %q file", messageStr, bp.filename) + logger.Infof("Canceled %s of %q file %s", messageStr, bp.filename, retryStr) return case <-bp.done: - logger.Infof("Completed %s %q", messageStr, bp.filename) + logger.Infof("Completed %s %q %s", messageStr, bp.filename, retryStr) + return + case <-bp.failed: + logger.Infof("Failed %s %q %s", messageStr, bp.filename, retryStr) return case <-tick.C: written := float64(atomic.LoadInt64(&bp.nn)) if bp.maxSize == 0 { - logger.Infof("%s %q: %.02fkb", messageStr, bp.filename, written/1024.0) + logger.Infof("%s %q %s: %.02fkb", messageStr, bp.filename, retryStr, written/1024.0) } else { maxSize := float64(bp.maxSize) - logger.Infof("%s %q: %.02f%% (%.02f/%.02fkb)", messageStr, bp.filename, 100.0*written/maxSize, written/1024.0, maxSize/1024.0) + logger.Infof("%s %q %s: %.02f%% (%.02f/%.02fkb)", messageStr, bp.filename, retryStr, 100.0*written/maxSize, written/1024.0, maxSize/1024.0) } } } @@ -815,12 +833,15 @@ func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, // backupFile backs up an individual file. func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, fe *FileEntry, name string) (finalErr error) { - ctx, cancel := context.WithCancel(ctx) - defer func() { - if finalErr != nil { - cancel() - } - }() + // We need another context that does not live outside of this function. + // Reporting progress, compressing and writing are operations that will be + // over by the time we exit this function, they can use this cancelable context. + // However, AddFile is something that may continue in the background even after + // this function exits. In this case, we give it the parent context so the caller + // has more control over when to cancel AddFile. + cancelableCtx, cancel := context.WithCancel(ctx) + defer cancel() + // Open the source file for reading. openSourceAt := time.Now() source, err := fe.open(params.Cnf, true) @@ -843,11 +864,12 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara return err } + retryStr := retryToString(fe.RetryCount) br := newBackupReader(fe.Name, fi.Size(), timedSource) - go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, false /*restore*/) + go br.ReportProgress(cancelableCtx, builtinBackupProgress, params.Logger, false /*restore*/, retryStr) // Open the destination file for writing, and a buffer. - params.Logger.Infof("Backing up file: %v", fe.Name) + params.Logger.Infof("Backing up file: %v %s", fe.Name, retryStr) openDestAt := time.Now() dest, err := bh.AddFile(ctx, name, fi.Size()) if err != nil { @@ -879,19 +901,20 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara defer func() { // Close the backupPipe to finish writing on destination. - if err := bw.Close(); err != nil { + if err := bw.Close(createAndCopyErr == nil); err != nil { createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrapf(err, "cannot flush destination: %v", name)) } - if err := br.Close(); err != nil { + if err := br.Close(createAndCopyErr == nil); err != nil { createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrap(err, "failed to close the source reader")) } + }() // Create the gzip compression pipe, if necessary. if backupStorageCompress { var compressor io.WriteCloser if ExternalCompressorCmd != "" { - compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) + compressor, err = newExternalCompressor(cancelableCtx, ExternalCompressorCmd, writer, params.Logger) } else { compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) } @@ -902,13 +925,13 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) - closer := ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout) + closer := ioutil.NewTimeoutCloser(cancelableCtx, compressor, closeTimeout) defer func() { // Close gzip to flush it, after that all data is sent to writer. closeCompressorAt := time.Now() - params.Logger.Infof("closing compressor") + params.Logger.Infof("Closing compressor for file: %s %s", fe.Name, retryStr) if cerr := closer.Close(); err != nil { - cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", name) + cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", fe.Name) params.Logger.Error(cerr) createAndCopyErr = errors.Join(createAndCopyErr, cerr) } @@ -938,6 +961,94 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara return nil } +func (be *BuiltinBackupEngine) backupManifest( + ctx context.Context, + params BackupParams, + bh backupstorage.BackupHandle, + backupPosition replication.Position, + purgedPosition replication.Position, + fromPosition replication.Position, + fromBackupName string, + serverUUID string, + mysqlVersion string, + incrDetails *IncrementalBackupDetails, + fes []FileEntry, + currentAttempt int, +) (finalErr error) { + retryStr := retryToString(currentAttempt) + params.Logger.Infof("Backing up file %s %s", backupManifestFileName, retryStr) + defer func() { + state := "Completed" + if finalErr != nil { + state = "Failed" + } + params.Logger.Infof("%s backing up %s %s", state, backupManifestFileName, retryStr) + }() + + // Creating this function allows us to ensure we always close the writer no matter what, + // and in case of success that we close it before calling bh.EndBackup. + addAndWrite := func() (addAndWriteError error) { + // open the MANIFEST + wc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) + if err != nil { + return vterrors.Wrapf(err, "cannot add %v to backup %s", backupManifestFileName, retryStr) + } + defer func() { + if err := wc.Close(); err != nil { + addAndWriteError = errors.Join(addAndWriteError, vterrors.Wrapf(err, "cannot close backup: %v", backupManifestFileName)) + } + }() + + // JSON-encode and write the MANIFEST + bm := &builtinBackupManifest{ + // Common base fields + BackupManifest: BackupManifest{ + BackupName: bh.Name(), + BackupMethod: builtinBackupEngineName, + Position: backupPosition, + PurgedPosition: purgedPosition, + FromPosition: fromPosition, + FromBackup: fromBackupName, + Incremental: !fromPosition.IsZero(), + ServerUUID: serverUUID, + TabletAlias: params.TabletAlias, + Keyspace: params.Keyspace, + Shard: params.Shard, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), + FinishedTime: time.Now().UTC().Format(time.RFC3339), + MySQLVersion: mysqlVersion, + UpgradeSafe: params.UpgradeSafe, + IncrementalDetails: incrDetails, + }, + + // Builtin-specific fields + FileEntries: fes, + SkipCompress: !backupStorageCompress, + CompressionEngine: CompressionEngineName, + ExternalDecompressor: ManifestExternalDecompressorCmd, + } + data, err := json.MarshalIndent(bm, "", " ") + if err != nil { + return vterrors.Wrapf(err, "cannot JSON encode %v %s", backupManifestFileName, retryStr) + } + if _, err := wc.Write(data); err != nil { + return vterrors.Wrapf(err, "cannot write %v %s", backupManifestFileName, retryStr) + } + return nil + } + + err := addAndWrite() + if err != nil { + return err + } + + err = bh.EndBackup(ctx) + if err != nil { + return err + } + return bh.Error() +} + // executeRestoreFullBackup restores the files from a full backup. The underlying mysql database service is expected to be stopped. func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { @@ -997,8 +1108,8 @@ func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Conte // we return the position from which replication should start // otherwise an error is returned func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { - var bm builtinBackupManifest - if err := getBackupManifestInto(ctx, bh, &bm); err != nil { + bm, err := be.restoreManifest(ctx, params, bh) + if err != nil { return nil, err } @@ -1007,7 +1118,6 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor return nil, err } - var err error if bm.Incremental { err = be.executeRestoreIncrementalBackup(ctx, params, bh, bm) } else { @@ -1020,6 +1130,26 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor return &bm.BackupManifest, nil } +func (be *BuiltinBackupEngine) restoreManifest(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (bm builtinBackupManifest, finalErr error) { + var retryCount int + defer func() { + state := "Completed" + if finalErr != nil { + state = "Failed" + } + params.Logger.Infof("%s restoring %s %s", state, backupManifestFileName, retryToString(retryCount)) + }() + + for ; retryCount <= maxRetriesPerFile; retryCount++ { + params.Logger.Infof("Restoring file %s %s", backupManifestFileName, retryToString(retryCount)) + if finalErr = getBackupManifestInto(ctx, bh, &bm); finalErr == nil { + break + } + params.Logger.Infof("Failed restoring %s %s", backupManifestFileName, retryToString(retryCount)) + } + return +} + // restoreFiles will copy all the files from the BackupStorage to the // right place. func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) (createdDir string, err error) { @@ -1040,75 +1170,79 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP } } fes := bm.FileEntries - sema := semaphore.NewWeighted(int64(params.Concurrency)) - rec := concurrency.AllErrorRecorder{} - wg := sync.WaitGroup{} - - ctxCancel, cancel := context.WithCancel(ctx) - defer func() { - // We may still have operations in flight that require a valid context, such as adding files to S3. - // Unless we encountered an error, we should not cancel the context. This is taken care of later - // in the process. If we encountered an error however, we can safely cancel the context as we should - // no longer work on anything and exit fast. + _ = be.restoreFileEntries(ctx, fes, bh, bm, params, createdDir) + if files := bh.GetFailedFiles(); len(files) > 0 { + newFEs := make([]FileEntry, len(fes)) + for _, file := range files { + fileNb, err := strconv.Atoi(file) + if err != nil { + return "", vterrors.Wrapf(err, "failed to retry file '%s'", file) + } + oldFes := fes[fileNb] + newFEs[fileNb] = FileEntry{ + Base: oldFes.Base, + Name: oldFes.Name, + ParentPath: oldFes.ParentPath, + Hash: oldFes.Hash, + RetryCount: 1, + } + bh.ResetErrorForFile(file) + } + err = be.restoreFileEntries(ctx, newFEs, bh, bm, params, createdDir) if err != nil { - cancel() + return "", err } - }() + } + return createdDir, nil +} + +func (be *BuiltinBackupEngine) restoreFileEntries(ctx context.Context, fes []FileEntry, bh backupstorage.BackupHandle, bm builtinBackupManifest, params RestoreParams, createdDir string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(params.Concurrency) for i := range fes { - wg.Add(1) - go func(i int) { - defer wg.Done() + if fes[i].Name == "" { + continue + } + g.Go(func() error { fe := &fes[i] - // Wait until we are ready to go, return if we encounter an error - acqErr := sema.Acquire(ctxCancel, 1) - if acqErr != nil { - log.Errorf("Unable to acquire semaphore needed to restore file: %s, err: %s", fe.Name, acqErr.Error()) - rec.RecordError(acqErr) - cancel() - return - } - defer sema.Release(1) - - // First check if we have any error, if we have, there is no point trying to restore this file. - // We check for errors before checking if the context is canceled on purpose, if there was an - // error, the context would have been canceled already. - if rec.HasErrors() { - params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error()) - return - } - + name := fmt.Sprintf("%v", i) // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, // which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. select { - case <-ctxCancel.Done(): + case <-ctx.Done(): log.Errorf("Context canceled or timed out during %q restore", fe.Name) - rec.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) - return + bh.RecordError(name, vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return nil default: } fe.ParentPath = createdDir + // And restore the file. - name := fmt.Sprintf("%v", i) - params.Logger.Infof("Copying file %v: %v", name, fe.Name) - err := be.restoreFile(ctxCancel, params, bh, fe, bm, name) - if err != nil { - rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fe.Name)) - cancel() + params.Logger.Infof("Copying file %v: %v %s", name, fe.Name, retryToString(fe.RetryCount)) + if errRestore := be.restoreFile(ctx, params, bh, fe, bm, name); errRestore != nil { + bh.RecordError(name, vterrors.Wrapf(errRestore, "failed to restore file %v to %v", name, fe.Name)) + if fe.RetryCount >= maxRetriesPerFile { + // this is the last attempt, and we have an error, we can return an error, which will let errgroup + // know it can cancel the context + return errRestore + } } - }(i) + return nil + }) } - wg.Wait() - return createdDir, rec.Error() + _ = g.Wait() + return bh.Error() } // restoreFile restores an individual file. func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, fe *FileEntry, bm builtinBackupManifest, name string) (finalErr error) { ctx, cancel := context.WithCancel(ctx) defer cancel() + // Open the source file for reading. openSourceAt := time.Now() source, err := bh.ReadFile(ctx, name) @@ -1126,8 +1260,15 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa params.Stats.Scope(stats.Operation("Source:Close")).TimedIncrement(time.Since(closeSourceAt)) }() - br := newBackupReader(name, 0, timedSource) - go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, true) + // Create the backup/source reader and start reporting progress + retryStr := retryToString(fe.RetryCount) + br := newBackupReader(fe.Name, 0, timedSource) + go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, true, retryStr) + defer func() { + if err := br.Close(finalErr == nil); err != nil { + finalErr = vterrors.Wrap(finalErr, "failed to close the source reader") + } + }() var reader io.Reader = br // Open the destination file for writing. @@ -1213,10 +1354,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa return vterrors.Wrap(err, "failed to flush destination buffer") } - if err := br.Close(); err != nil { - return vterrors.Wrap(err, "failed to close the source reader") - } - return nil } diff --git a/go/vt/mysqlctl/cephbackupstorage/ceph.go b/go/vt/mysqlctl/cephbackupstorage/ceph.go index f8e33dbe641..62330b869f0 100644 --- a/go/vt/mysqlctl/cephbackupstorage/ceph.go +++ b/go/vt/mysqlctl/cephbackupstorage/ceph.go @@ -32,9 +32,10 @@ import ( minio "github.com/minio/minio-go" "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + + "vitess.io/vitess/go/vt/log" + errorsbackup "vitess.io/vitess/go/vt/mysqlctl/errors" "vitess.io/vitess/go/vt/servenv" ) @@ -69,23 +70,8 @@ type CephBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder waitGroup sync.WaitGroup -} - -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) Error() error { - return bh.errors.Error() + errorsbackup.PerFileErrorRecorder } // Directory implements BackupHandle. @@ -109,7 +95,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi defer bh.waitGroup.Done() // ceph bucket name is where the backups will go - //backup handle dir field contains keyspace/shard value + // backup handle dir field contains keyspace/shard value bucket := alterBucketName(bh.dir) // Give PutObject() the read end of the pipe. @@ -120,7 +106,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi // Signal the writer that an error occurred, in case it's not done writing yet. reader.CloseWithError(err) // In case the error happened after the writer finished, we need to remember it. - bh.RecordError(err) + bh.RecordError(filename, err) } }() // Give our caller the write end of the pipe. diff --git a/go/vt/mysqlctl/errors/errors.go b/go/vt/mysqlctl/errors/errors.go new file mode 100644 index 00000000000..02485901e50 --- /dev/null +++ b/go/vt/mysqlctl/errors/errors.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "strings" + "sync" +) + +type BackupErrorRecorder interface { + RecordError(string, error) + HasErrors() bool + Error() error + GetFailedFiles() []string + ResetErrorForFile(string) +} + +// PerFileErrorRecorder records errors and group them by filename. +// This is particularly useful when processing several files at the same time +// and wanting to know which files failed. +type PerFileErrorRecorder struct { + mu sync.Mutex + errors map[string][]error +} + +// RecordError records a possible error: +// - does nothing if err is nil +func (pfer *PerFileErrorRecorder) RecordError(filename string, err error) { + if err == nil { + return + } + + pfer.mu.Lock() + defer pfer.mu.Unlock() + + if pfer.errors == nil { + pfer.errors = make(map[string][]error, 1) + } + pfer.errors[filename] = append(pfer.errors[filename], err) +} + +// HasErrors returns true if we ever recorded an error +func (pfer *PerFileErrorRecorder) HasErrors() bool { + pfer.mu.Lock() + defer pfer.mu.Unlock() + return len(pfer.errors) > 0 +} + +// Error returns all the errors that were recorded +func (pfer *PerFileErrorRecorder) Error() error { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return nil + } + + var errs []string + for _, fileErrs := range pfer.errors { + for _, err := range fileErrs { + errs = append(errs, err.Error()) + } + } + if len(errs) == 0 { + return nil + } + return errors.New(strings.Join(errs, "; ")) +} + +// GetFailedFiles returns a slice of filenames, each of this file have at least 1 error. +func (pfer *PerFileErrorRecorder) GetFailedFiles() []string { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return nil + } + files := make([]string, 0, len(pfer.errors)) + for filename := range pfer.errors { + files = append(files, filename) + } + return files +} + +// ResetErrorForFile removes all the errors of a given file. +func (pfer *PerFileErrorRecorder) ResetErrorForFile(filename string) { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return + } + delete(pfer.errors, filename) +} diff --git a/go/vt/mysqlctl/fakebackupstorage.go b/go/vt/mysqlctl/fakebackupstorage.go index 75587191157..582b422cf58 100644 --- a/go/vt/mysqlctl/fakebackupstorage.go +++ b/go/vt/mysqlctl/fakebackupstorage.go @@ -21,20 +21,21 @@ import ( "fmt" "io" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + "vitess.io/vitess/go/vt/mysqlctl/errors" ) type FakeBackupHandle struct { Dir string NameV string ReadOnly bool - Errors concurrency.AllErrorRecorder + errors.PerFileErrorRecorder AbortBackupCalls []context.Context AbortBackupReturn error AddFileCalls []FakeBackupHandleAddFileCall AddFileReturn FakeBackupHandleAddFileReturn + AddFileReturnF func(filename string) FakeBackupHandleAddFileReturn EndBackupCalls []context.Context EndBackupReturn error ReadFileCalls []FakeBackupHandleReadFileCall @@ -57,18 +58,6 @@ type FakeBackupHandleReadFileCall struct { Filename string } -func (fbh *FakeBackupHandle) RecordError(err error) { - fbh.Errors.RecordError(err) -} - -func (fbh *FakeBackupHandle) HasErrors() bool { - return fbh.Errors.HasErrors() -} - -func (fbh *FakeBackupHandle) Error() error { - return fbh.Errors.Error() -} - func (fbh *FakeBackupHandle) Directory() string { return fbh.Dir } @@ -79,6 +68,11 @@ func (fbh *FakeBackupHandle) Name() string { func (fbh *FakeBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { fbh.AddFileCalls = append(fbh.AddFileCalls, FakeBackupHandleAddFileCall{ctx, filename, filesize}) + + if fbh.AddFileReturnF != nil { + r := fbh.AddFileReturnF(filename) + return r.WriteCloser, r.Err + } return fbh.AddFileReturn.WriteCloser, fbh.AddFileReturn.Err } diff --git a/go/vt/mysqlctl/filebackupstorage/file.go b/go/vt/mysqlctl/filebackupstorage/file.go index 99148d9169b..bd73c55e70c 100644 --- a/go/vt/mysqlctl/filebackupstorage/file.go +++ b/go/vt/mysqlctl/filebackupstorage/file.go @@ -27,8 +27,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/ioutil" - "vitess.io/vitess/go/vt/concurrency" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" @@ -59,7 +60,7 @@ type FileBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder + errors.PerFileErrorRecorder } func NewBackupHandle( @@ -79,21 +80,6 @@ func NewBackupHandle( } } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) RecordError(err error) { - fbh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) HasErrors() bool { - return fbh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) Error() error { - return fbh.errors.Error() -} - // Directory is part of the BackupHandle interface func (fbh *FileBackupHandle) Directory() string { return fbh.dir diff --git a/go/vt/mysqlctl/gcsbackupstorage/gcs.go b/go/vt/mysqlctl/gcsbackupstorage/gcs.go index 814395a225a..adecbb9bbba 100644 --- a/go/vt/mysqlctl/gcsbackupstorage/gcs.go +++ b/go/vt/mysqlctl/gcsbackupstorage/gcs.go @@ -32,8 +32,9 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" ) @@ -65,22 +66,7 @@ type GCSBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder -} - -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) Error() error { - return bh.errors.Error() + errors.PerFileErrorRecorder } // Directory implements BackupHandle. diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go index b3a8117aafa..97861e83729 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3.go +++ b/go/vt/mysqlctl/s3backupstorage/s3.go @@ -40,6 +40,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -48,7 +49,8 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" + errorsbackup "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/vt/log" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" @@ -144,8 +146,8 @@ type S3BackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder waitGroup sync.WaitGroup + errorsbackup.PerFileErrorRecorder } // Directory is part of the backupstorage.BackupHandle interface. @@ -158,39 +160,23 @@ func (bh *S3BackupHandle) Name() string { return bh.name } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) Error() error { - return bh.errors.Error() -} - // AddFile is part of the backupstorage.BackupHandle interface. func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { if bh.readOnly { return nil, fmt.Errorf("AddFile cannot be called on read-only backup") } - // Calculate s3 upload part size using the source filesize - partSizeBytes := manager.DefaultUploadPartSize - if filesize > 0 { - minimumPartSize := float64(filesize) / float64(manager.MaxUploadParts) - // Round up to ensure large enough partsize - calculatedPartSizeBytes := int64(math.Ceil(minimumPartSize)) - if calculatedPartSizeBytes > partSizeBytes { - partSizeBytes = calculatedPartSizeBytes - } - } + partSizeBytes := calculateUploadPartSize(filesize) reader, writer := io.Pipe() + bh.handleAddFile(ctx, filename, partSizeBytes, reader, func(err error) { + reader.CloseWithError(err) + }) + + return writer, nil +} + +func (bh *S3BackupHandle) handleAddFile(ctx context.Context, filename string, partSizeBytes int64, reader io.Reader, closer func(error)) { bh.waitGroup.Add(1) go func() { @@ -221,12 +207,24 @@ func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize }) }) if err != nil { - reader.CloseWithError(err) - bh.RecordError(err) + closer(err) + bh.RecordError(filename, err) } }() +} - return writer, nil +func calculateUploadPartSize(filesize int64) int64 { + // Calculate s3 upload part size using the source filesize + partSizeBytes := manager.DefaultUploadPartSize + if filesize > 0 { + minimumPartSize := float64(filesize) / float64(manager.MaxUploadParts) + // Round up to ensure large enough partsize + calculatedPartSizeBytes := int64(math.Ceil(minimumPartSize)) + if calculatedPartSizeBytes > partSizeBytes { + partSizeBytes = calculatedPartSizeBytes + } + } + return partSizeBytes } // EndBackup is part of the backupstorage.BackupHandle interface. @@ -505,13 +503,24 @@ func (bs *S3BackupStorage) client() (*s3.Client, error) { return nil, err } - bs._client = s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = forcePath - if retryCount >= 0 { - o.RetryMaxAttempts = retryCount - o.Retryer = &ClosedConnectionRetryer{} - } - }, s3.WithEndpointResolverV2(newEndpointResolver())) + options := []func(options *s3.Options){ + func(o *s3.Options) { + o.UsePathStyle = forcePath + if retryCount >= 0 { + o.RetryMaxAttempts = retryCount + o.Retryer = &ClosedConnectionRetryer{ + awsRetryer: retry.NewStandard(func(options *retry.StandardOptions) { + options.MaxAttempts = retryCount + }), + } + } + }, + } + if endpoint != "" { + options = append(options, s3.WithEndpointResolverV2(newEndpointResolver())) + } + + bs._client = s3.NewFromConfig(cfg, options...) if len(bucket) == 0 { return nil, fmt.Errorf("--s3_backup_storage_bucket required") diff --git a/go/vt/mysqlctl/s3backupstorage/s3_mock.go b/go/vt/mysqlctl/s3backupstorage/s3_mock.go new file mode 100644 index 00000000000..f244c4d63b1 --- /dev/null +++ b/go/vt/mysqlctl/s3backupstorage/s3_mock.go @@ -0,0 +1,223 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3backupstorage + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" +) + +type FakeS3BackupHandle struct { + *S3BackupHandle + + AddFileReturnF func(s3 *S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) + ReadFileReturnF func(s3 *S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) + + mu sync.Mutex + addPerFile map[string]int + readPerFile map[string]int +} + +type FakeConfig struct { + Region string + Endpoint string + Bucket string + ForcePath bool +} + +func InitFlag(cfg FakeConfig) { + region = cfg.Region + endpoint = cfg.Endpoint + bucket = cfg.Bucket + forcePath = cfg.ForcePath +} + +func NewFakeS3BackupHandle(ctx context.Context, dir, name string, logger logutil.Logger, stats backupstats.Stats) (*FakeS3BackupHandle, error) { + s := newS3BackupStorage() + bs := s.WithParams(backupstorage.Params{ + Logger: logger, + Stats: stats, + }) + bh, err := bs.StartBackup(ctx, dir, name) + if err != nil { + return nil, err + } + return &FakeS3BackupHandle{ + S3BackupHandle: bh.(*S3BackupHandle), + addPerFile: make(map[string]int), + readPerFile: make(map[string]int), + }, nil +} + +func NewFakeS3RestoreHandle(ctx context.Context, dir string, logger logutil.Logger, stats backupstats.Stats) (*FakeS3BackupHandle, error) { + s := newS3BackupStorage() + bs := s.WithParams(backupstorage.Params{ + Logger: logger, + Stats: stats, + }) + bhs, err := bs.ListBackups(ctx, dir) + if err != nil { + return nil, err + } + return &FakeS3BackupHandle{ + S3BackupHandle: bhs[0].(*S3BackupHandle), + addPerFile: make(map[string]int), + readPerFile: make(map[string]int), + }, nil +} + +func (fbh *FakeS3BackupHandle) Directory() string { + return fbh.S3BackupHandle.Directory() +} + +func (fbh *FakeS3BackupHandle) Name() string { + return fbh.S3BackupHandle.Name() +} + +func (fbh *FakeS3BackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { + fbh.mu.Lock() + defer func() { + fbh.addPerFile[filename] += 1 + fbh.mu.Unlock() + }() + + if fbh.AddFileReturnF != nil { + return fbh.AddFileReturnF(fbh.S3BackupHandle, ctx, filename, filesize, fbh.addPerFile[filename] == 0) + } + return fbh.S3BackupHandle.AddFile(ctx, filename, filesize) +} + +func (fbh *FakeS3BackupHandle) EndBackup(ctx context.Context) error { + return fbh.S3BackupHandle.EndBackup(ctx) +} + +func (fbh *FakeS3BackupHandle) AbortBackup(ctx context.Context) error { + return fbh.S3BackupHandle.AbortBackup(ctx) +} + +func (fbh *FakeS3BackupHandle) ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) { + fbh.mu.Lock() + defer func() { + fbh.readPerFile[filename] += 1 + fbh.mu.Unlock() + }() + + if fbh.ReadFileReturnF != nil { + return fbh.ReadFileReturnF(fbh.S3BackupHandle, ctx, filename, fbh.readPerFile[filename] == 0) + } + return fbh.S3BackupHandle.ReadFile(ctx, filename) +} + +func (fbh *FakeS3BackupHandle) RecordError(s string, err error) { + fbh.S3BackupHandle.RecordError(s, err) +} + +func (fbh *FakeS3BackupHandle) HasErrors() bool { + return fbh.S3BackupHandle.HasErrors() +} + +func (fbh *FakeS3BackupHandle) Error() error { + return fbh.S3BackupHandle.Error() +} + +func (fbh *FakeS3BackupHandle) GetFailedFiles() []string { + return fbh.S3BackupHandle.GetFailedFiles() +} + +func (fbh *FakeS3BackupHandle) ResetErrorForFile(s string) { + fbh.S3BackupHandle.ResetErrorForFile(s) +} + +type failReadPipeReader struct { + *io.PipeReader +} + +func (fwr *failReadPipeReader) Read(p []byte) (n int, err error) { + return 0, errors.New("failing read") +} + +func FailFirstWrite(s3bh *S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) { + if s3bh.readOnly { + return nil, fmt.Errorf("AddFile cannot be called on read-only backup") + } + + partSizeBytes := calculateUploadPartSize(filesize) + reader, writer := io.Pipe() + r := io.Reader(reader) + + if firstAdd { + r = &failReadPipeReader{PipeReader: reader} + } + + s3bh.handleAddFile(ctx, filename, partSizeBytes, r, func(err error) { + reader.CloseWithError(err) + }) + return writer, nil +} + +func FailAllWrites(s3bh *S3BackupHandle, ctx context.Context, filename string, filesize int64, _ bool) (io.WriteCloser, error) { + if s3bh.readOnly { + return nil, fmt.Errorf("AddFile cannot be called on read-only backup") + } + + partSizeBytes := calculateUploadPartSize(filesize) + reader, writer := io.Pipe() + r := &failReadPipeReader{PipeReader: reader} + + s3bh.handleAddFile(ctx, filename, partSizeBytes, r, func(err error) { + r.PipeReader.CloseWithError(err) + }) + return writer, nil +} + +type failRead struct{} + +func (fr *failRead) Read(p []byte) (n int, err error) { + return 0, errors.New("failing read") +} + +func (fr *failRead) Close() error { + return nil +} + +func FailFirstRead(s3bh *S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) { + rc, err := s3bh.ReadFile(ctx, filename) + if err != nil { + return nil, err + } + if firstRead { + return &failRead{}, nil + } + return rc, nil +} + +// FailAllReadExpectManifest is used to fail every attempt at reading a file from S3. +// Only the MANIFEST file is allowed to be read, because otherwise we wouldn't even try to read the normal files. +func FailAllReadExpectManifest(s3bh *S3BackupHandle, ctx context.Context, filename string, _ bool) (io.ReadCloser, error) { + const manifestFileName = "MANIFEST" + if filename == manifestFileName { + return s3bh.ReadFile(ctx, filename) + } + return &failRead{}, nil +} diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 937dffe70b3..4fc34992b0f 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -1135,30 +1135,45 @@ func (ts *trafficSwitcher) switchDeniedTables(ctx context.Context) error { return nil } +// cancelMigration attempts to revert all changes made during the migration so that we can get back to the +// state when traffic switching (or reversing) was initiated. func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { var err error + + if ctx.Err() != nil { + // Even though we create a new context later on we still record any context error: + // for forensics in case of failures. + ts.Logger().Infof("In Cancel migration: original context invalid: %s", ctx.Err()) + } + + // We create a new context while canceling the migration, so that we are independent of the original + // context being cancelled prior to or during the cancel operation. + cmTimeout := 60 * time.Second + cmCtx, cmCancel := context.WithTimeout(context.Background(), cmTimeout) + defer cmCancel() + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { - err = ts.switchDeniedTables(ctx) + err = ts.switchDeniedTables(cmCtx) } else { - err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) + err = ts.changeShardsAccess(cmCtx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) } if err != nil { - ts.Logger().Errorf("Cancel migration failed: %v", err) + ts.Logger().Errorf("Cancel migration failed: could not revert denied tables / shard access: %v", err) } - sm.CancelStreamMigrations(ctx) + sm.CancelStreamMigrations(cmCtx) err = ts.ForAllTargets(func(target *MigrationTarget) error { query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) - _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) + _, err := ts.TabletManagerClient().VReplicationExec(cmCtx, target.GetPrimary().Tablet, query) return err }) if err != nil { ts.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) } - err = ts.deleteReverseVReplication(ctx) + err = ts.deleteReverseVReplication(cmCtx) if err != nil { ts.Logger().Errorf("Cancel migration failed: could not delete reverse vreplication streams: %v", err) } diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go index dfcad4e5e6b..e8e763c1ee1 100644 --- a/go/vt/vtgate/engine/plan_description.go +++ b/go/vt/vtgate/engine/plan_description.go @@ -126,6 +126,133 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } +// PrimitiveDescriptionFromString creates primitive description out of a data string. +func PrimitiveDescriptionFromString(data string) (pd PrimitiveDescription, err error) { + resultMap := make(map[string]any) + err = json.Unmarshal([]byte(data), &resultMap) + if err != nil { + return PrimitiveDescription{}, err + } + return PrimitiveDescriptionFromMap(resultMap) +} + +// PrimitiveDescriptionFromMap populates the fields of a PrimitiveDescription from a map representation. +func PrimitiveDescriptionFromMap(data map[string]any) (pd PrimitiveDescription, err error) { + if opType, isPresent := data["OperatorType"]; isPresent { + pd.OperatorType = opType.(string) + } + if variant, isPresent := data["Variant"]; isPresent { + pd.Variant = variant.(string) + } + if ksMap, isPresent := data["Keyspace"]; isPresent { + ksMap := ksMap.(map[string]any) + pd.Keyspace = &vindexes.Keyspace{ + Name: ksMap["Name"].(string), + Sharded: ksMap["Sharded"].(bool), + } + } + if ttt, isPresent := data["TargetTabletType"]; isPresent { + pd.TargetTabletType = topodatapb.TabletType(ttt.(int)) + } + if other, isPresent := data["Other"]; isPresent { + pd.Other = other.(map[string]any) + } + if inpName, isPresent := data["InputName"]; isPresent { + pd.InputName = inpName.(string) + } + if avgRows, isPresent := data["AvgNumberOfRows"]; isPresent { + pd.RowsReceived = RowsReceived{ + int(avgRows.(float64)), + } + } + if sq, isPresent := data["ShardsQueried"]; isPresent { + sq := int(sq.(float64)) + pd.ShardsQueried = (*ShardsQueried)(&sq) + } + if inputs, isPresent := data["Inputs"]; isPresent { + inputs := inputs.([]any) + for _, input := range inputs { + inputMap := input.(map[string]any) + inp, err := PrimitiveDescriptionFromMap(inputMap) + if err != nil { + return PrimitiveDescription{}, err + } + pd.Inputs = append(pd.Inputs, inp) + } + } + return pd, nil +} + +// WalkPrimitiveDescription walks the primitive description. +func WalkPrimitiveDescription(pd PrimitiveDescription, f func(PrimitiveDescription)) { + f(pd) + for _, child := range pd.Inputs { + WalkPrimitiveDescription(child, f) + } +} + +func (pd PrimitiveDescription) Equals(other PrimitiveDescription) string { + if pd.Variant != other.Variant { + return fmt.Sprintf("Variant: %v != %v", pd.Variant, other.Variant) + } + + if pd.OperatorType != other.OperatorType { + return fmt.Sprintf("OperatorType: %v != %v", pd.OperatorType, other.OperatorType) + } + + // TODO (harshit): enable this to compare keyspace as well + // switch { + // case pd.Keyspace == nil && other.Keyspace == nil: + // // do nothing + // case pd.Keyspace != nil && other.Keyspace != nil: + // if pd.Keyspace.Name != other.Keyspace.Name { + // return fmt.Sprintf("Keyspace.Name: %v != %v", pd.Keyspace.Name, other.Keyspace.Name) + // } + // default: + // return "Keyspace is nil in one of the descriptions" + // } + + switch { + case pd.TargetDestination == nil && other.TargetDestination == nil: + // do nothing + case pd.TargetDestination != nil && other.TargetDestination != nil: + if pd.TargetDestination.String() != other.TargetDestination.String() { + return fmt.Sprintf("TargetDestination: %v != %v", pd.TargetDestination, other.TargetDestination) + } + default: + return "TargetDestination is nil in one of the descriptions" + } + + if pd.TargetTabletType != other.TargetTabletType { + return fmt.Sprintf("TargetTabletType: %v != %v", pd.TargetTabletType, other.TargetTabletType) + } + + switch { + case pd.Other == nil && other.Other == nil: + // do nothing + case pd.Other != nil && other.Other != nil: + if len(pd.Other) != len(other.Other) { + return fmt.Sprintf("Other length did not match: %v != %v", pd.Other, other.Other) + } + for ky, val := range pd.Other { + if other.Other[ky] != val { + return fmt.Sprintf("Other[%v]: %v != %v", ky, val, other.Other[ky]) + } + } + default: + return "Other is nil in one of the descriptions" + } + if len(pd.Inputs) != len(other.Inputs) { + return fmt.Sprintf("Inputs length did not match: %v != %v", len(pd.Inputs), len(other.Inputs)) + } + for idx, input := range pd.Inputs { + if diff := input.Equals(other.Inputs[idx]); diff != "" { + return diff + } + } + return "" +} + func average(nums []int) float64 { total := 0 for _, num := range nums { diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index e84ab7fbb21..0bb47361f55 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -187,7 +187,6 @@ func NewExecutor( // setting the vcursor config. e.initVConfig(warnOnShardedOnly, pv) - vschemaacl.Init() // we subscribe to update from the VSchemaManager e.vm = &VSchemaManager{ subscriber: e.SaveVSchema, diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 62101639a11..f8ed0b558c3 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -401,9 +401,9 @@ func TestExecutorSetMetadata(t *testing.T) { }) t.Run("Session 2", func(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -411,11 +411,11 @@ func TestExecutorSetMetadata(t *testing.T) { set := "set @@vitess_metadata.app_keyspace_v1= '1'" _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) + require.NoError(t, err, "%s error: %v", set, err) show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` result, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) - assert.NoError(t, err) + require.NoError(t, err) want := "1" got := result.Rows[0][1].ToString() @@ -424,11 +424,11 @@ func TestExecutorSetMetadata(t *testing.T) { // Update metadata set = "set @@vitess_metadata.app_keyspace_v2='2'" _, err = executor.Execute(ctx, nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) + require.NoError(t, err, "%s error: %v", set, err) show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` gotqr, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) - assert.NoError(t, err) + require.NoError(t, err) wantqr := &sqltypes.Result{ Fields: buildVarCharFields("Key", "Value"), diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 2b6d4710bce..d3ab28d6600 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -335,9 +335,9 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { } func TestExecutorDeleteMetadata(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -1318,9 +1318,9 @@ func TestExecutorDDLFk(t *testing.T) { } func TestExecutorAlterVSchemaKeyspace(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -1347,9 +1347,9 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { } func TestExecutorCreateVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -1417,9 +1417,9 @@ func TestExecutorCreateVindexDDL(t *testing.T) { } func TestExecutorAddDropVschemaTableDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -1486,8 +1486,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled - vschemaacl.AuthorizedDDLUsers = "%" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) @@ -1499,8 +1498,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { } // test when only one user is enabled - vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) @@ -1511,7 +1509,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { } // restore the disallowed state - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) } func TestExecutorUnrecognized(t *testing.T) { diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index 825b65ab8f3..1acc1ba2362 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -135,9 +135,9 @@ func waitForColVindexes(t *testing.T, ks, table string, names []string, executor } func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) @@ -163,9 +163,9 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { } func TestPlanExecutorCreateVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -205,9 +205,9 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { } func TestPlanExecutorDropVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -274,9 +274,9 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { } func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -331,9 +331,9 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { } func TestExecutorAddSequenceDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -391,9 +391,9 @@ func TestExecutorAddSequenceDDL(t *testing.T) { } func TestExecutorDropSequenceDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -442,9 +442,9 @@ func TestExecutorDropSequenceDDL(t *testing.T) { } func TestExecutorDropAutoIncDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -484,9 +484,9 @@ func TestExecutorDropAutoIncDDL(t *testing.T) { } func TestExecutorAddDropVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -747,8 +747,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled - vschemaacl.AuthorizedDDLUsers = "%" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) @@ -760,8 +759,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { } // test when only one user is enabled - vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) @@ -772,5 +770,5 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { } // restore the disallowed state - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index ccbc9821170..7135f4dff29 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -649,21 +649,12 @@ func createFkDefinition(childCols []string, parentTableName string, parentCols [ } } -type ( - planTest struct { - Comment string `json:"comment,omitempty"` - Query string `json:"query,omitempty"` - Plan json.RawMessage `json:"plan,omitempty"` - Skip bool `json:"skip,omitempty"` - } -) - func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchemaWrapper, render bool) { opts := jsondiff.DefaultConsoleOptions() s.T().Run(filename, func(t *testing.T) { failed := false - var expected []planTest + var expected []PlanTest for _, tcase := range readJSONTests(filename) { testName := tcase.Comment if testName == "" { @@ -672,9 +663,10 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem if tcase.Query == "" { continue } - current := planTest{ - Comment: testName, + current := PlanTest{ + Comment: tcase.Comment, Query: tcase.Query, + SkipE2E: tcase.SkipE2E, } vschema.Version = Gen4 out := getPlanOutput(tcase, vschema, render) @@ -720,8 +712,8 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem }) } -func readJSONTests(filename string) []planTest { - var output []planTest +func readJSONTests(filename string) []PlanTest { + var output []PlanTest file, err := os.Open(locateFile(filename)) if err != nil { panic(err) @@ -735,7 +727,7 @@ func readJSONTests(filename string) []planTest { return output } -func getPlanOutput(tcase planTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) { +func getPlanOutput(tcase PlanTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) { defer func() { if r := recover(); r != nil { out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack())) @@ -867,7 +859,7 @@ func BenchmarkBaselineVsMirrored(b *testing.B) { }) } -func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemawrapper.VSchemaWrapper) { +func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []PlanTest, vschema *vschemawrapper.VSchemaWrapper) { b.ReportAllocs() for n := 0; n < b.N; n++ { for _, tcase := range testCases { diff --git a/go/vt/vtgate/planbuilder/test_helper.go b/go/vt/vtgate/planbuilder/test_helper.go new file mode 100644 index 00000000000..25d6b7306d1 --- /dev/null +++ b/go/vt/vtgate/planbuilder/test_helper.go @@ -0,0 +1,27 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import "encoding/json" + +type PlanTest struct { + Comment string `json:"comment,omitempty"` + Query string `json:"query,omitempty"` + Plan json.RawMessage `json:"plan,omitempty"` + Skip bool `json:"skip,omitempty"` + SkipE2E bool `json:"skip_e2e,omitempty"` +} diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index 8b268e367dd..49a03a8f05a 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -940,19 +940,44 @@ "Table": "`user`, user_extra" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.`name` from music where 1 != 1", - "Query": "select music.`name` from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.`name` from music where 1 != 1", + "Query": "select music.`name` from music where music.id = :user_id", + "Table": "music" + } + ] } ] } @@ -2992,19 +3017,44 @@ ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.id = :u2_val2", - "Table": "music", "Values": [ ":u2_val2" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.id = :u2_val2", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 4194a369bd6..edce4ebd0cb 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -3404,19 +3404,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3430,19 +3455,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1 and column_c = 2", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3456,19 +3506,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3482,19 +3557,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_a = 3 and column_b = 1", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_a = 3 and column_b = 1", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_a = 3 and column_b = 1", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json index 2e0fe429c1f..bec64fd7b1e 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json @@ -4709,19 +4709,44 @@ ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */", - "Table": "music", "Values": [ "20" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */", + "Table": "music" + } + ] } ] }, diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json index 060f073a366..a35949cd4c1 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json @@ -318,19 +318,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", + "Table": "music" + } + ] } ] } @@ -379,19 +404,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json index 36f1472007d..6a8e94c0241 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json @@ -544,19 +544,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -597,19 +622,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -650,19 +700,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -770,19 +845,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/main.sql b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql new file mode 100644 index 00000000000..8c15b99218c --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql @@ -0,0 +1,12 @@ +CREATE TABLE `unsharded` ( + `id` INT NOT NULL PRIMARY KEY, + `col1` VARCHAR(255) DEFAULT NULL, + `col2` VARCHAR(255) DEFAULT NULL, + `name` VARCHAR(255) DEFAULT NULL +); + +CREATE TABLE `unsharded_auto` ( + `id` INT NOT NULL PRIMARY KEY, + `col1` VARCHAR(255) DEFAULT NULL, + `col2` VARCHAR(255) DEFAULT NULL +); \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/user.sql b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql new file mode 100644 index 00000000000..55f4078557a --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql @@ -0,0 +1,100 @@ +CREATE TABLE user +( + id INT PRIMARY KEY, + col BIGINT, + predef1 VARCHAR(255), + predef2 VARCHAR(255), + textcol1 VARCHAR(255), + intcol BIGINT, + textcol2 VARCHAR(255) +); + +CREATE TABLE user_metadata +( + user_id INT, + email VARCHAR(255), + address VARCHAR(255), + md5 VARCHAR(255), + non_planable VARCHAR(255), + PRIMARY KEY (user_id) +); + +CREATE TABLE music +( + user_id INT, + id INT, + PRIMARY KEY (user_id) +); + +CREATE TABLE samecolvin +( + col VARCHAR(255), + PRIMARY KEY (col) +); + +CREATE TABLE multicolvin +( + kid INT, + column_a VARCHAR(255), + column_b VARCHAR(255), + column_c VARCHAR(255), + PRIMARY KEY (kid) +); + +CREATE TABLE customer +( + id INT, + email VARCHAR(255), + phone VARCHAR(255), + PRIMARY KEY (id) +); + +CREATE TABLE multicol_tbl +( + cola VARCHAR(255), + colb VARCHAR(255), + colc VARCHAR(255), + name VARCHAR(255), + PRIMARY KEY (cola, colb) +); + +CREATE TABLE mixed_tbl +( + shard_key VARCHAR(255), + lkp_key VARCHAR(255), + PRIMARY KEY (shard_key) +); + +CREATE TABLE pin_test +( + id INT PRIMARY KEY +); + +CREATE TABLE cfc_vindex_col +( + c1 VARCHAR(255), + c2 VARCHAR(255), + PRIMARY KEY (c1) +); + +CREATE TABLE unq_lkp_idx +( + unq_key INT PRIMARY KEY, + keyspace_id VARCHAR(255) +); + +CREATE TABLE t1 +( + c1 INT, + c2 INT, + c3 INT, + PRIMARY KEY (c1) +); + +CREATE TABLE authoritative +( + user_id bigint NOT NULL, + col1 VARCHAR(255), + col2 bigint, + PRIMARY KEY (user_id) +) ENGINE=InnoDB; \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json index ab69df2cc47..eac13216380 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json @@ -92,7 +92,8 @@ "user.user", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "select with timeout directive sets QueryTimeout in the route", @@ -197,7 +198,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select with partial scatter directive", @@ -402,7 +404,8 @@ { "comment": "test table lookup failure for authoritative code path", "query": "select a.* from authoritative", - "plan": "Unknown table 'a'" + "plan": "Unknown table 'a'", + "skip_e2e": true }, { "comment": "select * from qualified authoritative table", @@ -470,7 +473,8 @@ "user.authoritative", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "auto-resolve anonymous columns for simple route", @@ -493,7 +497,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "json_arrayagg in single sharded query", @@ -519,7 +524,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "json_objectagg in single sharded query", @@ -545,17 +551,20 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "unsupported json aggregation expressions in scatter query", "query": "select count(1) from user where cola = 'abc' group by n_id having json_arrayagg(a_id) = '[]'", - "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'", + "skip_e2e": true }, { "comment": "Cannot auto-resolve for cross-shard joins", "query": "select col from user join user_extra", - "plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous", + "skip_e2e": true }, { "comment": "Auto-resolve should work if unique vindex columns are referenced", @@ -597,7 +606,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "database calls should be substituted", @@ -619,7 +629,8 @@ "TablesUsed": [ "main.dual" ] - } + }, + "skip_e2e": true }, { "comment": "last_insert_id for unsharded route", @@ -641,7 +652,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from dual on unqualified keyspace", @@ -694,7 +706,8 @@ { "comment": "prefixing dual with a keyspace should not work", "query": "select 1 from user.dual", - "plan": "table dual not found" + "plan": "table dual not found", + "skip_e2e": true }, { "comment": "RHS route referenced", @@ -736,7 +749,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Both routes referenced", @@ -778,7 +792,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Expression with single-route reference", @@ -820,7 +835,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "subquery with an aggregation in order by that can be merged into a single route", @@ -847,7 +863,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Jumbled references", @@ -889,7 +906,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Comments", @@ -931,7 +949,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "for update", @@ -973,7 +992,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Field query should work for joins select bind vars", @@ -1018,7 +1038,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Case preservation", @@ -1060,12 +1081,14 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "syntax error", "query": "the quick brown fox", - "plan": "syntax error at position 4 near 'the'" + "plan": "syntax error at position 4 near 'the'", + "skip_e2e": true }, { "comment": "Hex number is not treated as a simple value", @@ -1113,7 +1136,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Selection but make the planner explicitly use a vindex", @@ -1164,12 +1188,14 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Vindex hint on a non-existing vindex", "query": "select * from user use vindex (does_not_exist) where id = 1", - "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'" + "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'", + "skip_e2e": true }, { "comment": "sharded limit offset", @@ -1231,7 +1257,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Sharding Key Condition in Parenthesis", @@ -1257,7 +1284,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Multiple parenthesized expressions", @@ -1283,7 +1311,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Multiple parenthesized expressions", @@ -1309,7 +1338,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Column Aliasing with Table.Column", @@ -1387,7 +1417,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Column as boolean-ish", @@ -1413,7 +1444,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "PK as fake boolean, and column as boolean-ish", @@ -1439,7 +1471,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "top level subquery in select", @@ -1484,7 +1517,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "sub-expression subquery in select", @@ -1529,7 +1563,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "select * from derived table expands specific columns", @@ -1571,17 +1606,20 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "duplicate columns not allowed in derived table", "query": "select * from (select user.id, user_extra.id from user join user_extra) as t", - "plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'", + "skip_e2e": true }, { "comment": "non-existent symbol in cross-shard derived table", "query": "select t.col from (select user.id from user join user_extra) as t", - "plan": "column 't.col' not found" + "plan": "column 't.col' not found", + "skip_e2e": true }, { "comment": "union with the same target shard", @@ -1608,7 +1646,8 @@ "user.music", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "union with the same target shard last_insert_id", @@ -1635,7 +1674,8 @@ "user.music", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "unsharded union in derived table", @@ -1793,7 +1833,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "routing table on music", @@ -1815,7 +1856,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "testing SingleRow Projection", @@ -1962,7 +2004,8 @@ "main.unsharded_a", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Complex expression in a subquery used in NOT IN clause of an aggregate query", @@ -2015,7 +2058,8 @@ "main.unsharded_a", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "testing SingleRow Projection with arithmetics", @@ -2218,12 +2262,14 @@ { "comment": "sql_calc_found_rows in sub queries", "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)", - "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + "skip_e2e": true }, { "comment": "sql_calc_found_rows in derived table", "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1", - "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + "skip_e2e": true }, { "comment": "select from unsharded keyspace into dumpfile", @@ -2245,7 +2291,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from unsharded keyspace into outfile", @@ -2267,7 +2314,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from unsharded keyspace into outfile s3", @@ -2289,7 +2337,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "left join with a dual table on left - merge-able", @@ -2500,17 +2549,20 @@ { "comment": "Union after into outfile is incorrect", "query": "select id from user into outfile 'out_file_name' union all select id from music", - "plan": "syntax error at position 55 near 'union'" + "plan": "syntax error at position 55 near 'union'", + "skip_e2e": true }, { "comment": "Into outfile s3 in derived table is incorrect", "query": "select id from (select id from user into outfile s3 'inner_outfile') as t2", - "plan": "syntax error at position 41 near 'into'" + "plan": "syntax error at position 41 near 'into'", + "skip_e2e": true }, { "comment": "Into outfile s3 in derived table with union incorrect", "query": "select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2", - "plan": "syntax error at position 41 near 'into'" + "plan": "syntax error at position 41 near 'into'", + "skip_e2e": true }, { "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", @@ -2579,7 +2631,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "((((select 1))))", @@ -2624,7 +2677,8 @@ "main.dual", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "subquery in select expression of derived table", @@ -2694,7 +2748,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "ORDER BY subquery", @@ -2764,7 +2819,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "plan test for a natural character set string", @@ -2831,7 +2887,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Straight Join ensures specific ordering of joins", @@ -2876,7 +2933,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Dual query should be handled on the vtgate even with a LIMIT", @@ -2950,7 +3008,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Straight Join preserved in MySQL query", @@ -2973,7 +3032,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery in exists clause", @@ -3031,7 +3091,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery in exists clause with an order by", @@ -3090,7 +3151,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery having dependencies on two tables", @@ -3163,7 +3225,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery using a column twice", @@ -3220,7 +3283,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", @@ -3271,7 +3335,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "union as a derived table", @@ -3360,7 +3425,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "mergeable derived table with order by and limit", @@ -3382,7 +3448,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "mergeable derived table with group by and limit", @@ -3404,7 +3471,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select user.id, trim(leading 'x' from user.name) from user", @@ -3426,7 +3494,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "json utility functions", @@ -3448,7 +3517,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "dual query with exists clause", @@ -3546,7 +3616,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "yeah, it does not make sense, but it's valid", @@ -3639,7 +3710,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "groupe by with non aggregated columns and table alias", @@ -3661,7 +3733,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Functions that return JSON value attributes", @@ -3866,7 +3939,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "gtid functions", @@ -3934,7 +4008,8 @@ "user.user_extra", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates", @@ -3962,7 +4037,8 @@ "user.music_extra", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "SQL_CALC_FOUND_ROWS with vindex lookup", @@ -4073,7 +4149,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "`None` route being merged with another route via join predicate on Vindex columns", @@ -4122,7 +4199,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", @@ -4200,7 +4278,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", @@ -4314,7 +4393,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", @@ -4336,7 +4416,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", @@ -4375,26 +4456,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable scatter subquery with LIMIT", @@ -4430,26 +4537,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", @@ -4483,26 +4616,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable subquery with `MAX` aggregate", @@ -4543,19 +4702,44 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, @@ -4596,19 +4780,44 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, @@ -4649,26 +4858,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable subquery with multiple levels of derived statements", @@ -4760,26 +4995,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable subquery with multiple levels of derived statements", @@ -4815,26 +5076,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", @@ -5033,7 +5320,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", @@ -5097,7 +5385,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", @@ -5220,7 +5509,8 @@ "main.dual", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "subquery having join table on clause, using column reference of outer select table", @@ -5269,7 +5559,8 @@ "main.unsharded", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "ALL modifier on unsharded table works well", @@ -5292,7 +5583,8 @@ "main.unsharded", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "allow last_insert_id with argument", @@ -5337,7 +5629,8 @@ "user.music_extra", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Query with non-plannable lookup vindex", @@ -5363,7 +5656,8 @@ "TablesUsed": [ "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "join query with lookup and join on different vindex column", @@ -5415,7 +5709,8 @@ "user.user", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "pick email as vindex lookup", @@ -5425,7 +5720,7 @@ "Original": "select * from customer where email = 'a@mail.com'", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5510,7 +5805,8 @@ "TablesUsed": [ "user.customer" ] - } + }, + "skip_e2e": true }, { "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", @@ -5520,7 +5816,7 @@ "Original": "select * from customer where email = 'a@mail.com' and phone = 123456", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5571,7 +5867,7 @@ "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5634,7 +5930,8 @@ "TablesUsed": [ "user.samecolvin" ] - } + }, + "skip_e2e": true }, { "comment": "column with qualifier is correctly used", @@ -5677,7 +5974,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Derived tables going to a single shard still need to expand derived table columns", @@ -5722,7 +6020,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "column name aliases in outer join queries", @@ -5777,7 +6076,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Over clause works for unsharded tables", @@ -5799,7 +6099,8 @@ "TablesUsed": [ "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "join with derived table with alias and join condition - merge into route", diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json index 7feabb0a698..2927c1c6093 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json @@ -447,34 +447,84 @@ "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select distinct 1 from music where id = 1", - "Table": "music", "Values": [ "1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select distinct 1 from music where id = 1", + "Table": "music" + } + ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select distinct 1 from music where id = 2", - "Table": "music", "Values": [ "2" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select distinct 1 from music where id = 2", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json index 4fe275f2398..a5de9d3697e 100644 --- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json +++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json @@ -58,34 +58,52 @@ "sharded": true, "vindexes": { "user_index": { - "type": "hash_test", + "type": "hash", "owner": "user" }, "kid_index": { - "type": "hash_test", + "type": "hash", "owner": "multicolvin" }, + "hash": { + "type": "hash" + }, "user_md5_index": { "type": "unicode_loose_md5" }, "music_user_map": { - "type": "lookup_test", - "owner": "music" + "type": "lookup_unique", + "owner": "music", + "params": { + "table": "name_user_vdx", + "from": "name", + "to": "keyspace_id" + } }, "cola_map": { - "type": "lookup_test", - "owner": "multicolvin" + "type": "lookup_unique", + "owner": "multicolvin", + "params": { + "table": "cola_map", + "from": "cola", + "to": "keyspace_id" + } }, "colb_colc_map": { - "type": "lookup_test", - "owner": "multicolvin" + "type": "lookup_unique", + "owner": "multicolvin", + "params": { + "table": "colb_colc_map", + "from": "colb,colc", + "to": "keyspace_id" + } }, "cola_kid_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "overlap_vindex" }, "name_user_map": { - "type": "name_lkp_test", + "type": "lookup", "owner": "user", "params": { "table": "name_user_vdx", @@ -94,42 +112,56 @@ } }, "email_user_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "user_metadata" }, "address_user_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "user_metadata" }, "costly_map": { - "type": "costly", - "owner": "user" + "type": "lookup_cost", + "owner": "user", + "params": { + "table": "costly_map", + "from": "costly", + "to": "keyspace_id", + "cost": "100" + } }, "hash_dup": { - "type": "hash_test", + "type": "hash", "owner": "user" }, "vindex1": { - "type": "hash_test", + "type": "hash", "owner": "samecolvin" }, "vindex2": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "samecolvin" }, "cfc": { "type": "cfc" }, "multicolIdx": { - "type": "multiCol_test" + "type": "multicol", + "params": { + "column_count": "2" + } }, "colc_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "multicol_tbl" }, "name_muticoltbl_map": { - "type": "name_lkp_test", - "owner": "multicol_tbl" + "type": "lookup", + "owner": "multicol_tbl", + "params": { + "table": "name_user_vdx", + "from": "name", + "to": "keyspace_id" + } }, "non_planable_user_map": { "type": "lookup_unicodeloosemd5_hash", @@ -141,7 +173,7 @@ "owner": "user_metadata" }, "lkp_shard_map": { - "type": "name_lkp_test", + "type": "lookup_unique", "owner": "mixed_tbl", "params": { "table": "lkp_shard_vdx", @@ -153,18 +185,18 @@ "type": "xxhash" }, "unq_lkp_bf_vdx": { - "type": "unq_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "unq_lkp_idx", - "from": " ", + "from": "unq_key", "to": "keyspace_id", "cost": "100", "write_only": "true" } }, "unq_lkp_vdx": { - "type": "unq_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "unq_lkp_idx", @@ -174,11 +206,11 @@ } }, "lkp_bf_vdx": { - "type": "name_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "lkp_shard_vdx", - "from": " ", + "from": "unq_key", "to": "keyspace_id", "write_only": "true" } @@ -352,6 +384,22 @@ } ] }, + "cola_map": { + "column_vindexes": [ + { + "column": "cola", + "name": "hash" + } + ] + }, + "colb_colc_map": { + "column_vindexes": [ + { + "column": "colb", + "name": "hash" + } + ] + }, "overlap_vindex": { "column_vindexes": [ { @@ -462,6 +510,14 @@ } ] }, + "costly_map": { + "column_vindexes": [ + { + "column": "name", + "name": "user_md5_index" + } + ] + }, "mixed_tbl": { "column_vindexes": [ { @@ -641,7 +697,10 @@ "type": "hash_test" }, "multicolIdx": { - "type": "multiCol_test" + "type": "multicol", + "params": { + "column_count": "3" + } } }, "tables": { diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go index a97411a6ac8..eeadb69b532 100644 --- a/go/vt/vtgate/vindexes/cached_size.go +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -175,6 +175,18 @@ func (cached *Keyspace) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Name))) return size } +func (cached *LookupCost) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field LookupNonUnique *vitess.io/vitess/go/vt/vtgate/vindexes.LookupNonUnique + size += cached.LookupNonUnique.CachedSize(true) + return size +} func (cached *LookupHash) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) diff --git a/go/vt/vtgate/vindexes/lookup_cost.go b/go/vt/vtgate/vindexes/lookup_cost.go new file mode 100644 index 00000000000..6556032cea5 --- /dev/null +++ b/go/vt/vtgate/vindexes/lookup_cost.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "strconv" +) + +var ( + _ SingleColumn = (*LookupCost)(nil) + _ Lookup = (*LookupCost)(nil) + _ LookupPlanable = (*LookupCost)(nil) +) + +func init() { + Register("lookup_cost", newLookupCost) +} + +const defaultCost = 5 + +// LookupCost defines a test vindex that uses the cost provided by the user. +// This is a test vindex. +type LookupCost struct { + *LookupNonUnique + cost int +} + +// Cost returns the cost of this vindex as provided. +func (lc *LookupCost) Cost() int { + return lc.cost +} + +func newLookupCost(name string, m map[string]string) (Vindex, error) { + lookup, err := newLookup(name, m) + if err != nil { + return nil, err + } + cost := getInt(m, "cost", defaultCost) + return &LookupCost{ + LookupNonUnique: lookup.(*LookupNonUnique), + cost: cost, + }, nil + +} + +func getInt(m map[string]string, key string, defaultVal int) int { + val, ok := m[key] + if !ok { + return defaultVal + } + intVal, err := strconv.Atoi(val) + if err != nil { + return defaultVal + } + return intVal +} diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index 25f8e135698..f9bcf43ddaa 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "reflect" "strings" "testing" @@ -3551,6 +3552,20 @@ func TestFindTableWithSequences(t *testing.T) { } } +func TestGlobalTables(t *testing.T) { + input, err := os.ReadFile("../planbuilder/testdata/vschemas/schema.json") + require.NoError(t, err) + + var vs vschemapb.SrvVSchema + err = json2.UnmarshalPB(input, &vs) + require.NoError(t, err) + + got := BuildVSchema(&vs, sqlparser.NewTestParser()) + tbl, err := got.findGlobalTable("user", false) + require.NoError(t, err) + assert.NotNil(t, tbl) +} + func vindexNames(vindexes []*ColumnVindex) (result []string) { for _, vindex := range vindexes { result = append(result, vindex.Name) diff --git a/go/vt/vtgate/vschemaacl/vschemaacl.go b/go/vt/vtgate/vschemaacl/vschemaacl.go index 5345d1437fc..08f6c2b0cd4 100644 --- a/go/vt/vtgate/vschemaacl/vschemaacl.go +++ b/go/vt/vtgate/vschemaacl/vschemaacl.go @@ -18,26 +18,67 @@ package vschemaacl import ( "strings" - "sync" "github.com/spf13/pflag" + "github.com/spf13/viper" - "vitess.io/vitess/go/vt/servenv" - + "vitess.io/vitess/go/viperutil" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" ) -var ( - // AuthorizedDDLUsers specifies the users that can perform ddl operations - AuthorizedDDLUsers string - - // ddlAllowAll is true if the special value of "*" was specified +type authorizedDDLUsers struct { allowAll bool + acl map[string]struct{} + source string +} + +func NewAuthorizedDDLUsers(users string) *authorizedDDLUsers { + acl := make(map[string]struct{}) + allowAll := false + + switch users { + case "": + case "%": + allowAll = true + default: + for _, user := range strings.Split(users, ",") { + user = strings.TrimSpace(user) + acl[user] = struct{}{} + } + } + + return &authorizedDDLUsers{ + allowAll: allowAll, + acl: acl, + source: users, + } +} - // ddlACL contains a set of allowed usernames - acl map[string]struct{} +func (a *authorizedDDLUsers) String() string { + return a.source +} - initMu sync.Mutex +var ( + // AuthorizedDDLUsers specifies the users that can perform ddl operations + AuthorizedDDLUsers = viperutil.Configure( + "vschema_ddl_authorized_users", + viperutil.Options[*authorizedDDLUsers]{ + FlagName: "vschema_ddl_authorized_users", + Default: &authorizedDDLUsers{}, + Dynamic: true, + GetFunc: func(v *viper.Viper) func(key string) *authorizedDDLUsers { + return func(key string) *authorizedDDLUsers { + newVal := v.GetString(key) + curVal, ok := v.Get(key).(*authorizedDDLUsers) + if ok && newVal == curVal.source { + return curVal + } + return NewAuthorizedDDLUsers(newVal) + } + }, + }, + ) ) // RegisterSchemaACLFlags installs log flags on the given FlagSet. @@ -46,7 +87,8 @@ var ( // calls this function, or call this function directly before parsing // command-line arguments. func RegisterSchemaACLFlags(fs *pflag.FlagSet) { - fs.StringVar(&AuthorizedDDLUsers, "vschema_ddl_authorized_users", AuthorizedDDLUsers, "List of users authorized to execute vschema ddl operations, or '%' to allow all users.") + fs.String("vschema_ddl_authorized_users", "", "List of users authorized to execute vschema ddl operations, or '%' to allow all users.") + viperutil.BindFlags(fs, AuthorizedDDLUsers) } func init() { @@ -55,33 +97,14 @@ func init() { } } -// Init parses the users option and sets allowAll / acl accordingly -func Init() { - initMu.Lock() - defer initMu.Unlock() - acl = make(map[string]struct{}) - allowAll = false - - if AuthorizedDDLUsers == "%" { - allowAll = true - return - } else if AuthorizedDDLUsers == "" { - return - } - - for _, user := range strings.Split(AuthorizedDDLUsers, ",") { - user = strings.TrimSpace(user) - acl[user] = struct{}{} - } -} - // Authorized returns true if the given caller is allowed to execute vschema operations func Authorized(caller *querypb.VTGateCallerID) bool { - if allowAll { + users := AuthorizedDDLUsers.Get() + if users.allowAll { return true } user := caller.GetUsername() - _, ok := acl[user] + _, ok := users.acl[user] return ok } diff --git a/go/vt/vtgate/vschemaacl/vschemaacl_test.go b/go/vt/vtgate/vschemaacl/vschemaacl_test.go index faa2dbfc294..cfd1de705af 100644 --- a/go/vt/vtgate/vschemaacl/vschemaacl_test.go +++ b/go/vt/vtgate/vschemaacl/vschemaacl_test.go @@ -35,8 +35,7 @@ func TestVschemaAcl(t *testing.T) { } // Test wildcard - AuthorizedDDLUsers = "%" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("%")) if !Authorized(&redUser) { t.Errorf("user should be authorized") @@ -46,8 +45,7 @@ func TestVschemaAcl(t *testing.T) { } // Test user list - AuthorizedDDLUsers = "oneUser, twoUser, redUser, blueUser" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("oneUser, twoUser, redUser, blueUser")) if !Authorized(&redUser) { t.Errorf("user should be authorized") @@ -57,8 +55,7 @@ func TestVschemaAcl(t *testing.T) { } // Revert to baseline state for other tests - AuthorizedDDLUsers = "" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("")) // By default no users are allowed in if Authorized(&redUser) { diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index 0a5bd9f26fd..3f8bc85ac7f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -305,7 +305,6 @@ func TestCreateVReplicationWorkflow(t *testing.T) { // results returned. Followed by ensuring that SwitchTraffic // and ReverseTraffic also work as expected. func TestMoveTablesUnsharded(t *testing.T) { - t.Skip("Skipping test temporarily as it is flaky on CI, pending investigation") ctx, cancel := context.WithCancel(context.Background()) defer cancel() sourceKs := "sourceks" @@ -403,6 +402,9 @@ func TestMoveTablesUnsharded(t *testing.T) { ftc.vrdbClient.AddInvariant(getCopyStateQuery, &sqltypes.Result{}) tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + for _, table := range defaultSchema.TableDefinitions { + tenv.db.AddQuery(fmt.Sprintf(getNonEmptyTableQuery, table.Name), &sqltypes.Result{}) + } insert := fmt.Sprintf(`%s values ('%s', 'keyspace:"%s" shard:"%s" filter:{rules:{match:"t1" filter:"select * from t1"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', %d, 0, 0, '{}')`, insertVReplicationPrefix, wf, sourceKs, sourceShard, tenv.cells[0], tenv.dbName, vreplID) ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil) @@ -1780,7 +1782,7 @@ func addInvariants(dbClient *binlogplayer.MockDBClient, vreplID, sourceTabletUID "0", )) dbClient.AddInvariant(fmt.Sprintf(updatePickedSourceTablet, cell, sourceTabletUID, vreplID), &sqltypes.Result{}) - + dbClient.AddInvariant("update _vt.vreplication set state='Running', message='' where id=1", &sqltypes.Result{}) } func addMaterializeSettingsTablesToSchema(ms *vtctldatapb.MaterializeSettings, tenv *testEnv, venv *vtenv.Environment) { diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index cb61c4bab99..b540fc9f8f0 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -36,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + "vitess.io/vitess/go/vt/mysqlctl/blackbox" "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -132,7 +132,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { require.NoError(t, os.MkdirAll(s, os.ModePerm)) } - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -371,7 +371,7 @@ func TestBackupRestoreLagged(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -591,7 +591,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -767,7 +767,7 @@ func TestDisableActiveReparents(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -877,25 +877,3 @@ func TestDisableActiveReparents(t *testing.T) { assert.False(t, destTablet.FakeMysqlDaemon.Replicating) assert.True(t, destTablet.FakeMysqlDaemon.Running) } - -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err - } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err - } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - capableOf := mysql.ServerVersionCapableOf(versionStr) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) - } - return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) -} diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 52da65d8041..bf42825d73c 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -103,6 +103,7 @@ var ( "vtgate_vindex_heavy", "vtgate_vschema", "vtgate_queries", + "vtgate_plantests", "vtgate_schema_tracker", "vtgate_foreignkey_stress", "vtorc", @@ -157,6 +158,9 @@ var ( "vreplication_migrate", "vreplication_vtctldclient_vdiff2_movetables_tz", } + clusterRequiringMinio = []string{ + "21", + } ) type unitTest struct { @@ -175,6 +179,7 @@ type clusterTest struct { EnablePartialJSON bool PartialKeyspace bool Cores16 bool + NeedsMinio bool } type vitessTesterTest struct { @@ -287,6 +292,13 @@ func generateClusterWorkflows(list []string, tpl string) { break } } + minioClusters := canonnizeList(clusterRequiringMinio) + for _, minioCluster := range minioClusters { + if minioCluster == cluster { + test.NeedsMinio = true + break + } + } if mysqlVersion == mysql57 { test.Platform = string(mysql57) } diff --git a/test/config.json b/test/config.json index bb383c18614..da0026f0125 100644 --- a/test/config.json +++ b/test/config.json @@ -136,6 +136,15 @@ "RetryMax": 1, "Tags": [] }, + "backup_s3": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/backup/s3", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "21", + "RetryMax": 1, + "Tags": [] + }, "backup_only": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/backup/vtbackup", "-timeout", "20m"], @@ -340,17 +349,6 @@ "RetryMax": 1, "Tags": [] }, - "pitr": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"], - "Command": [], - "Manual": false, - "Shard": "10", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "recovery": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"], @@ -887,6 +885,15 @@ "RetryMax": 1, "Tags": [] }, + "vtgate_plantests": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/plan_tests"], + "Command": [], + "Manual": false, + "Shard": "vtgate_plantests", + "RetryMax": 1, + "Tags": [] + }, "vtgate_unsharded": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/unsharded"], diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index 387d2e8509a..f51b06a2faf 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -157,6 +157,15 @@ jobs: {{end}} + {{if .NeedsMinio }} + - name: Install Minio + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + wget https://dl.min.io/server/minio/release/linux-amd64/minio + chmod +x minio + mv minio /usr/local/bin + {{end}} + {{if .MakeTools}} - name: Installing zookeeper and consul