diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml deleted file mode 100644 index a1296db1b09..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ /dev/null @@ -1,151 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_ghost) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_ghost) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl -s\ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v4 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: dorny/paths-filter@v3.0.1 - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'go/vt/sidecardb/**/*.sql' - - 'go/test/endtoend/onlineddl/vrepl_suite/**' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_ghost.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v5 - with: - go-version: 1.22.2 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v5 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get -qq update - # Install everything else we need, and configure - sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml deleted file mode 100644 index aee79e67c45..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml +++ /dev/null @@ -1,162 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_ghost) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v4 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: dorny/paths-filter@v3.0.1 - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'go/vt/sidecardb/**/*.sql' - - 'go/test/endtoend/onlineddl/vrepl_suite/**' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v5 - with: - go-version: 1.22.2 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v5 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go deleted file mode 100644 index 41a9a80086b..00000000000 --- a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go +++ /dev/null @@ -1,451 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ghost - -import ( - "flag" - "fmt" - "os" - "path" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/schema" - - "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/test/endtoend/onlineddl" - "vitess.io/vitess/go/test/endtoend/throttler" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - shards []cluster.Shard - vtParams mysql.ConnParams - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - schemaChangeDirectory = "" - totalTableCount = 4 - - normalMigrationWait = 20 * time.Second - - createTable = ` - CREATE TABLE %s ( - id bigint(20) NOT NULL, - msg varchar(64), - PRIMARY KEY (id) - ) ENGINE=InnoDB;` - insertStatements = []string{ - `insert into %s (id, msg) values (3, 'three')`, - `insert into %s (id, msg) values (5, 'five')`, - `insert into %s (id, msg) values (7, 'seven')`, - `insert into %s (id, msg) values (11, 'eleven')`, - `insert into %s (id, msg) values (13, 'thirteen')`, - } - // To verify non online-DDL behavior - alterTableNormalStatement = ` - ALTER TABLE %s - ADD COLUMN non_online int UNSIGNED NOT NULL` - // A trivial statement which must succeed and does not change the schema - alterTableTrivialStatement = ` - ALTER TABLE %s - ENGINE=InnoDB` - // The following statement is valid - alterTableSuccessfulStatement = ` - ALTER TABLE %s - MODIFY id bigint UNSIGNED NOT NULL, - ADD COLUMN ghost_col int NOT NULL, - ADD INDEX idx_msg(msg)` - // The following statement will fail because gh-ost requires some shared unique key - alterTableFailedStatement = ` - ALTER TABLE %s - DROP PRIMARY KEY, - DROP COLUMN ghost_col` - // We will run this query with "gh-ost --max-load=Threads_running=1" - alterTableThrottlingStatement = ` - ALTER TABLE %s - DROP COLUMN ghost_col` - onlineDDLCreateTableStatement = ` - CREATE TABLE %s ( - id bigint NOT NULL, - online_ddl_create_col INT NOT NULL, - PRIMARY KEY (id) - ) ENGINE=InnoDB;` - noPKCreateTableStatement = ` - CREATE TABLE %s ( - online_ddl_create_col INT NOT NULL - ) ENGINE=InnoDB;` - onlineDDLDropTableStatement = ` - DROP TABLE %s` - onlineDDLDropTableIfExistsStatement = ` - DROP TABLE IF EXISTS %s` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash_index": { - "type": "hash" - } - }, - "tables": { - "vt_onlineddl_test_00": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_01": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_02": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_03": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - } - } - } - ` -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitcode, err := func() (int, error) { - clusterInstance = cluster.NewCluster(cell, hostname) - schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) - defer os.RemoveAll(schemaChangeDirectory) - defer clusterInstance.Teardown() - - if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { - _ = os.Mkdir(schemaChangeDirectory, 0700) - } - - clusterInstance.VtctldExtraArgs = []string{ - "--schema_change_dir", schemaChangeDirectory, - "--schema_change_controller", "local", - "--schema_change_check_interval", "1s", - } - - clusterInstance.VtTabletExtraArgs = []string{ - "--heartbeat_interval", "250ms", - "--heartbeat_on_demand_duration", "5s", - "--migration_check_interval", "5s", - "--gh-ost-path", os.Getenv("VITESS_ENDTOEND_GH_OST_PATH"), // leave env variable empty/unset to get the default behavior. Override in Mac. - } - clusterInstance.VtGateExtraArgs = []string{ - "--ddl_strategy", "gh-ost", - } - - if err := clusterInstance.StartTopo(); err != nil { - return 1, err - } - - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - VSchema: vSchema, - } - - if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { - return 1, err - } - - vtgateInstance := clusterInstance.NewVtgateInstance() - // Start vtgate - if err := vtgateInstance.Setup(); err != nil { - return 1, err - } - // ensure it is torn down during cluster TearDown - clusterInstance.VtgateProcess = *vtgateInstance - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - - return m.Run(), nil - }() - if err != nil { - fmt.Printf("%v\n", err) - os.Exit(1) - } else { - os.Exit(exitcode) - } - -} - -func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) - shards = clusterInstance.Keyspaces[0].Shards - assert.Equal(t, 2, len(shards)) - - throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) - - testWithInitialSchema(t) - t.Run("create non_online", func(t *testing.T) { - _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), "vtctl", "non_online", "") - }) - t.Run("successful online alter, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, "gh-ost", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - - var totalRowsCopied uint64 - // count sum of rows copied in all shards, that should be the total number of rows inserted to the table - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - rowsCopied := row.AsUint64("rows_copied", 0) - totalRowsCopied += rowsCopied - } - require.Equal(t, uint64(len(insertStatements)), totalRowsCopied) - - // See that we're able to read logs after successful migration: - expectedMessage := "starting gh-ost" - logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) - assert.Equal(t, len(shards), len(logs)) - for i := range logs { - require.Contains(t, logs[i], expectedMessage) - } - - }) - t.Run("successful online alter, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("successful online alter, postponed, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -postpone-completion", "vtgate", "ghost_col", "") - // Should be still running! - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) - // Issue a complete and wait for successful completion - onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true) - // This part may take a while, because we depend on vreplicatoin polling - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("throttled migration", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) - }) - t.Run("failed migration", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableFailedStatement, "gh-ost", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) - // migration will fail again - }) - t.Run("cancel all migrations: nothing to cancel", func(t *testing.T) { - // no migrations pending at this time - time.Sleep(10 * time.Second) - onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) - }) - t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { - // spawn n migrations; cancel them via cancel-all - var wg sync.WaitGroup - count := 4 - for i := 0; i < count; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col", "") - }() - } - wg.Wait() - onlineddl.CheckCancelAllMigrations(t, &vtParams, len(shards)*count) - }) - t.Run("Online DROP, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtctl", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Online CREATE, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "gh-ost", "vtctl", "online_ddl_create_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Online DROP TABLE IF EXISTS, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - // this table existed - checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) - }) - t.Run("Online DROP TABLE IF EXISTS for nonexistent table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - // this table did not exist - checkTables(t, schema.OnlineDDLToGCUUID(uuid), 0) - }) - t.Run("Online DROP TABLE for nonexistent table, expect error, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) - }) - t.Run("Online CREATE no PK table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, noPKCreateTableStatement, "gh-ost", "vtgate", "online_ddl_create_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Fail ALTER for no PK table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - - expectedMessage := "No PRIMARY nor UNIQUE key found" - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - message := row["message"].ToString() - // the following message is generated by gh-ost. We test that it is captured in our 'message' column: - require.Contains(t, message, expectedMessage) - } - - // See that we're able to read logs after failed migration: - logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) - assert.Equal(t, len(shards), len(logs)) - for i := range logs { - require.Contains(t, logs[i], expectedMessage) - } - }) -} - -func testWithInitialSchema(t *testing.T) { - // Create 4 tables and populate them - var sqlQuery = "" //nolint - for i := 0; i < totalTableCount; i++ { - tableName := fmt.Sprintf("vt_onlineddl_test_%02d", i) - sqlQuery = fmt.Sprintf(createTable, tableName) - err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) - require.Nil(t, err) - - for _, insert := range insertStatements { - insertQuery := fmt.Sprintf(insert, tableName) - r := onlineddl.VtgateExecQuery(t, &vtParams, insertQuery, "") - require.NotNil(t, r) - } - } - - // Check if 4 tables are created - checkTables(t, "", totalTableCount) -} - -// testOnlineDDLStatement runs an online DDL, ALTER statement -func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, callerID string) (uuid string) { - tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) - sqlQuery := fmt.Sprintf(alterStatement, tableName) - if executeStrategy == "vtgate" { - row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, sqlQuery, "").Named().Row() - if row != nil { - uuid = row.AsString("uuid", "") - } - } else { - var err error - uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, CallerID: callerID}) - assert.NoError(t, err) - } - uuid = strings.TrimSpace(uuid) - fmt.Println("# Generated UUID (for debug purposes):") - fmt.Printf("<%s>\n", uuid) - - strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) - assert.NoError(t, err) - - if !strategySetting.Strategy.IsDirect() { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - } - - if expectHint != "" { - checkMigratedTable(t, tableName, expectHint) - } - return uuid -} - -// checkTables checks the number of tables in the first two shards. -func checkTables(t *testing.T, showTableName string, expectCount int) { - for i := range clusterInstance.Keyspaces[0].Shards { - checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) - } -} - -// checkTablesCount checks the number of tables in the given tablet -func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { - query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) - queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - require.Nil(t, err) - assert.Equal(t, expectCount, len(queryResult.Rows)) -} - -// checkMigratedTables checks the CREATE STATEMENT of a table after migration -func checkMigratedTable(t *testing.T, tableName, expectColumn string) { - for i := range clusterInstance.Keyspaces[0].Shards { - createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) - assert.Contains(t, createStatement, expectColumn) - } -} - -// getCreateTableStatement returns the CREATE TABLE statement for a given table -func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { - queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) - require.Nil(t, err) - - assert.Equal(t, len(queryResult.Rows), 1) - assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement - statement = queryResult.Rows[0][1].ToString() - return statement -} diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 883b1e43831..d5106db6bf6 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -1302,7 +1302,6 @@ func testSingleton(t *testing.T) { key updates_idx(updates) ) ENGINE=InnoDB ` - // We will run this query with "gh-ost --max-load=Threads_running=1" alterTableThrottlingStatement = ` ALTER TABLE stress_test DROP COLUMN created_timestamp ` @@ -1358,38 +1357,38 @@ DROP TABLE IF EXISTS stress_test checkTable(t, tableName, true) }) - var throttledUUID string - t.Run("throttled migration", func(t *testing.T) { - throttledUUID = testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "", false)) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) + var openEndedUUID string + t.Run("open ended migration", func(t *testing.T) { + openEndedUUID = testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtgate", "", "hint_col", "", false)) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusRunning) }) t.Run("failed singleton migration, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "rejected", true)) + uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtgate", "", "hint_col", "rejected", true)) assert.Empty(t, uuid) }) t.Run("failed singleton migration, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", true)) + uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtctl", "", "hint_col", "rejected", true)) assert.Empty(t, uuid) }) t.Run("failed revert migration", func(t *testing.T) { - uuid := testRevertMigration(t, createRevertParams(throttledUUID, onlineSingletonDDLStrategy, "vtgate", "", "rejected", true)) + uuid := testRevertMigration(t, createRevertParams(openEndedUUID, onlineSingletonDDLStrategy, "vtgate", "", "rejected", true)) assert.Empty(t, uuid) }) t.Run("terminate throttled migration", func(t *testing.T) { - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) - onlineddl.CheckCancelMigration(t, &vtParams, shards, throttledUUID, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, throttledUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusRunning) + onlineddl.CheckCancelMigration(t, &vtParams, shards, openEndedUUID, true) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, openEndedUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusCancelled) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusCancelled) }) - t.Run("successful gh-ost alter, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtctl", "", "hint_col", "", false)) + t.Run("successful alter, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "vitess --singleton", "vtctl", "", "hint_col", "", false)) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) - t.Run("successful gh-ost alter, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtgate", "", "hint_col", "", false)) + t.Run("successful alter, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "vitess --singleton", "vtgate", "", "hint_col", "", false)) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) @@ -1413,8 +1412,8 @@ DROP TABLE IF EXISTS stress_test var throttledUUIDs []string // singleton-context - t.Run("throttled migrations, singleton-context", func(t *testing.T) { - uuidList := testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "", false)) + t.Run("postponed migrations, singleton-context", func(t *testing.T) { + uuidList := testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "vitess --singleton-context --postpone-completion", "vtctl", "", "hint_col", "", false)) throttledUUIDs = strings.Split(uuidList, "\n") assert.Equal(t, 3, len(throttledUUIDs)) for _, uuid := range throttledUUIDs { @@ -1422,7 +1421,7 @@ DROP TABLE IF EXISTS stress_test } }) t.Run("failed migrations, singleton-context", func(t *testing.T) { - _ = testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", false)) + _ = testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "vitess --singleton-context --postpone-completion", "vtctl", "", "hint_col", "rejected", false)) }) t.Run("terminate throttled migrations", func(t *testing.T) { for _, uuid := range throttledUUIDs { diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index fd890df6c32..7e2bb338ed2 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -81,7 +81,6 @@ var ( "22", "mysql_server_vault", "vstream", - "onlineddl_ghost", "onlineddl_vrepl", "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", diff --git a/test/config.json b/test/config.json index 6db35dd8158..9cce5851225 100644 --- a/test/config.json +++ b/test/config.json @@ -259,15 +259,6 @@ "site_test" ] }, - "onlineddl_ghost": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/ghost", "-timeout", "30m"], - "Command": [], - "Manual": false, - "Shard": "onlineddl_ghost", - "RetryMax": 2, - "Tags": [] - }, "onlineddl_vrepl": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl", "-timeout", "30m"],