From 36a52e9a9661824ca43b9646b7846372e9cdbe34 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:06:07 +0530 Subject: [PATCH 01/33] Foreign Key Fuzzer Benchmark (#14542) Signed-off-by: Manan Gupta --- go/test/endtoend/utils/utils.go | 2 +- .../vtgate/foreignkey/fk_fuzz_test.go | 41 ++++++--- .../endtoend/vtgate/foreignkey/main_test.go | 83 ++++++++++++++----- .../unsharded_unmanaged_vschema.json | 41 +++++++++ .../endtoend/vtgate/foreignkey/utils_test.go | 75 ++++++++++++++++- 5 files changed, 206 insertions(+), 36 deletions(-) create mode 100644 go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index 9cf9c767fc7..b6bf207decb 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -299,7 +299,7 @@ func WaitForTableDeletions(ctx context.Context, t *testing.T, vtgateProcess clus } // WaitForColumn waits for a table's column to be present -func WaitForColumn(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { +func WaitForColumn(t testing.TB, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { timeout := time.After(60 * time.Second) for { select { diff --git a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go index 325bdedc5c9..096efead683 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go @@ -53,6 +53,7 @@ type fuzzer struct { updateShare int concurrency int queryFormat QueryFormat + noFkSetVar bool fkState *bool // shouldStop is an internal state variable, that tells the fuzzer @@ -83,6 +84,7 @@ func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare i updateShare: updateShare, queryFormat: queryFormat, fkState: fkState, + noFkSetVar: false, wg: sync.WaitGroup{}, } // Initially the fuzzer thread is stopped. @@ -475,7 +477,7 @@ func (fz *fuzzer) generateParameterizedDeleteQuery() (query string, params []any // getSetVarFkChecksVal generates an optimizer hint to randomly set the foreign key checks to on or off or leave them unaltered. func (fz *fuzzer) getSetVarFkChecksVal() string { - if fz.concurrency != 1 { + if fz.concurrency != 1 || fz.noFkSetVar { return "" } val := rand.Intn(3) @@ -703,14 +705,33 @@ func TestFkFuzzTest(t *testing.T) { } } -func validateReplication(t *testing.T) { - for _, keyspace := range clusterInstance.Keyspaces { - for _, shard := range keyspace.Shards { - for _, vttablet := range shard.Vttablets { - if vttablet.Type != "primary" { - checkReplicationHealthy(t, vttablet) - } - } - } +// BenchmarkFkFuzz benchmarks the performance of Vitess unmanaged, Vitess managed and vanilla MySQL performance on a given set of queries generated by the fuzzer. +func BenchmarkFkFuzz(b *testing.B) { + maxValForCol := 10 + maxValForId := 10 + insertShare := 50 + deleteShare := 50 + updateShare := 50 + numQueries := 1000 + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(b) + for i := 0; i < b.N; i++ { + queries, mysqlConn, vtConn, vtUnmanagedConn := setupBenchmark(b, maxValForId, maxValForCol, insertShare, deleteShare, updateShare, numQueries) + + // Now we run the benchmarks! + b.Run("MySQL", func(b *testing.B) { + startBenchmark(b) + runQueries(b, mysqlConn, queries) + }) + + b.Run("Vitess Managed Foreign Keys", func(b *testing.B) { + startBenchmark(b) + runQueries(b, vtConn, queries) + }) + + b.Run("Vitess Unmanaged Foreign Keys", func(b *testing.B) { + startBenchmark(b) + runQueries(b, vtUnmanagedConn, queries) + }) } } diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index 3ce449f893f..483c1d05e80 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package foreignkey import ( + "context" _ "embed" "flag" "fmt" @@ -31,13 +32,14 @@ import ( ) var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams - mysqlParams mysql.ConnParams - vtgateGrpcAddress string - shardedKs = "ks" - unshardedKs = "uks" - Cell = "test" + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + vtgateGrpcAddress string + shardedKs = "ks" + unshardedKs = "uks" + unshardedUnmanagedKs = "unmanaged_uks" + Cell = "test" //go:embed schema.sql schemaSQL string @@ -48,6 +50,9 @@ var ( //go:embed unsharded_vschema.json unshardedVSchema string + //go:embed unsharded_unmanaged_vschema.json + unshardedUnmanagedVSchema string + fkTables = []string{"fk_t1", "fk_t2", "fk_t3", "fk_t4", "fk_t5", "fk_t6", "fk_t7", "fk_t10", "fk_t11", "fk_t12", "fk_t13", "fk_t15", "fk_t16", "fk_t17", "fk_t18", "fk_t19", "fk_t20", "fk_multicol_t1", "fk_multicol_t2", "fk_multicol_t3", "fk_multicol_t4", "fk_multicol_t5", "fk_multicol_t6", "fk_multicol_t7", @@ -124,6 +129,16 @@ func TestMain(m *testing.M) { return 1 } + unmanagedKs := &cluster.Keyspace{ + Name: unshardedUnmanagedKs, + SchemaSQL: schemaSQL, + VSchema: unshardedUnmanagedVSchema, + } + err = clusterInstance.StartUnshardedKeyspace(*unmanagedKs, 1, false) + if err != nil { + return 1 + } + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 @@ -157,22 +172,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { require.NoError(t, err) deleteAll := func() { - _ = utils.Exec(t, mcmp.VtConn, "use `ks/-80`") - tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} - tables = append(tables, fkTables...) - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } - _ = utils.Exec(t, mcmp.VtConn, "use `ks/80-`") - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") - tables = []string{"u_t1", "u_t2", "u_t3"} - tables = append(tables, fkTables...) - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } + clearOutAllData(t, mcmp.VtConn, mcmp.MySQLConn) _ = utils.Exec(t, mcmp.VtConn, "use `ks`") } @@ -184,3 +184,40 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { cluster.PanicHandler(t) } } + +func startBenchmark(b *testing.B) { + ctx := context.Background() + vtConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(b, err) + mysqlConn, err := mysql.Connect(ctx, &mysqlParams) + require.NoError(b, err) + + clearOutAllData(b, vtConn, mysqlConn) +} + +func clearOutAllData(t testing.TB, vtConn *mysql.Conn, mysqlConn *mysql.Conn) { + _ = utils.Exec(t, vtConn, "use `ks/-80`") + tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} + tables = append(tables, fkTables...) + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } + _ = utils.Exec(t, vtConn, "use `ks/80-`") + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } + _ = utils.Exec(t, vtConn, "use `uks`") + tables = []string{"u_t1", "u_t2", "u_t3"} + tables = append(tables, fkTables...) + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } + _ = utils.Exec(t, vtConn, "use `unmanaged_uks`") + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } +} diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json b/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json new file mode 100644 index 00000000000..2698e23dac5 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json @@ -0,0 +1,41 @@ +{ + "sharded": false, + "foreignKeyMode": "unmanaged", + "tables": { + "u_t1": {}, + "u_t2": {}, + "fk_t1": {}, + "fk_t2": {}, + "fk_t3": {}, + "fk_t4": {}, + "fk_t5": {}, + "fk_t6": {}, + "fk_t7": {}, + "fk_t10": {}, + "fk_t11": {}, + "fk_t12": {}, + "fk_t13": {}, + "fk_t15": {}, + "fk_t16": {}, + "fk_t17": {}, + "fk_t18": {}, + "fk_t19": {}, + "fk_t20": {}, + "fk_multicol_t1": {}, + "fk_multicol_t2": {}, + "fk_multicol_t3": {}, + "fk_multicol_t4": {}, + "fk_multicol_t5": {}, + "fk_multicol_t6": {}, + "fk_multicol_t7": {}, + "fk_multicol_t10": {}, + "fk_multicol_t11": {}, + "fk_multicol_t12": {}, + "fk_multicol_t13": {}, + "fk_multicol_t15": {}, + "fk_multicol_t16": {}, + "fk_multicol_t17": {}, + "fk_multicol_t18": {}, + "fk_multicol_t19": {} + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/utils_test.go b/go/test/endtoend/vtgate/foreignkey/utils_test.go index b97e57d685c..9d4c53145b0 100644 --- a/go/test/endtoend/vtgate/foreignkey/utils_test.go +++ b/go/test/endtoend/vtgate/foreignkey/utils_test.go @@ -17,9 +17,11 @@ limitations under the License. package foreignkey import ( + "context" "database/sql" "fmt" "math/rand" + "slices" "strings" "testing" "time" @@ -75,7 +77,7 @@ func convertIntValueToString(value int) string { // waitForSchemaTrackingForFkTables waits for schema tracking to have run and seen the tables used // for foreign key tests. -func waitForSchemaTrackingForFkTables(t *testing.T) { +func waitForSchemaTrackingForFkTables(t testing.TB) { err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t1", "col") require.NoError(t, err) err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t18", "col") @@ -88,6 +90,8 @@ func waitForSchemaTrackingForFkTables(t *testing.T) { require.NoError(t, err) err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t11", "col") require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedUnmanagedKs, "fk_t11", "col") + require.NoError(t, err) } // getReplicaTablets gets all the replica tablets. @@ -238,7 +242,7 @@ func verifyDataIsCorrect(t *testing.T, mcmp utils.MySQLCompare, concurrency int) } // verifyDataMatches verifies that the two list of results are the same. -func verifyDataMatches(t *testing.T, resOne []*sqltypes.Result, resTwo []*sqltypes.Result) { +func verifyDataMatches(t testing.TB, resOne []*sqltypes.Result, resTwo []*sqltypes.Result) { require.EqualValues(t, len(resTwo), len(resOne), "Res 1 - %v, Res 2 - %v", resOne, resTwo) for idx, resultOne := range resOne { resultTwo := resTwo[idx] @@ -256,3 +260,70 @@ func collectFkTablesState(conn *mysql.Conn) []*sqltypes.Result { } return tablesData } + +func validateReplication(t *testing.T) { + for _, keyspace := range clusterInstance.Keyspaces { + for _, shard := range keyspace.Shards { + for _, vttablet := range shard.Vttablets { + if vttablet.Type != "primary" { + checkReplicationHealthy(t, vttablet) + } + } + } + } +} + +// compareResultRows compares the rows of the two results provided. +func compareResultRows(resOne *sqltypes.Result, resTwo *sqltypes.Result) bool { + return slices.EqualFunc(resOne.Rows, resTwo.Rows, func(a, b sqltypes.Row) bool { + return sqltypes.RowEqual(a, b) + }) +} + +// setupBenchmark sets up the benchmark by creating the set of queries that we want to run. It also ensures that the 3 modes (MySQL, Vitess Managed, Vitess Unmanaged) we verify all return the same results after the queries have been executed. +func setupBenchmark(b *testing.B, maxValForId int, maxValForCol int, insertShare int, deleteShare int, updateShare int, numQueries int) ([]string, *mysql.Conn, *mysql.Conn, *mysql.Conn) { + // Clear out all the data to ensure we start with a clean slate. + startBenchmark(b) + // Create a fuzzer to generate and store a certain set of queries. + fz := newFuzzer(1, maxValForId, maxValForCol, insertShare, deleteShare, updateShare, SQLQueries, nil) + fz.noFkSetVar = true + var queries []string + for j := 0; j < numQueries; j++ { + genQueries := fz.generateQuery() + require.Len(b, genQueries, 1) + queries = append(queries, genQueries[0]) + } + + // Connect to MySQL and run all the queries + mysqlConn, err := mysql.Connect(context.Background(), &mysqlParams) + require.NoError(b, err) + // Connect to Vitess managed foreign keys keyspace + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(b, err) + utils.Exec(b, vtConn, fmt.Sprintf("use `%v`", unshardedKs)) + // Connect to Vitess unmanaged foreign keys keyspace + vtUnmanagedConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(b, err) + utils.Exec(b, vtUnmanagedConn, fmt.Sprintf("use `%v`", unshardedUnmanagedKs)) + + // First we make sure that running all the queries in both the Vitess modes and MySQL gives the same data. + // So we run all the queries and then check that the data in all of them matches. + runQueries(b, mysqlConn, queries) + runQueries(b, vtConn, queries) + runQueries(b, vtUnmanagedConn, queries) + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + resVitessManaged, _ := vtConn.ExecuteFetch(query, 10000, true) + resMySQL, _ := mysqlConn.ExecuteFetch(query, 10000, true) + resVitessUnmanaged, _ := vtUnmanagedConn.ExecuteFetch(query, 10000, true) + require.True(b, compareResultRows(resVitessManaged, resMySQL), "Results for %v don't match\nVitess Managed\n%v\nMySQL\n%v", table, resVitessManaged, resMySQL) + require.True(b, compareResultRows(resVitessUnmanaged, resMySQL), "Results for %v don't match\nVitess Unmanaged\n%v\nMySQL\n%v", table, resVitessUnmanaged, resMySQL) + } + return queries, mysqlConn, vtConn, vtUnmanagedConn +} + +func runQueries(t testing.TB, conn *mysql.Conn, queries []string) { + for _, query := range queries { + _, _ = utils.ExecAllowError(t, conn, query) + } +} From 1843826feb4e8880f186a2d8c60c92bcc3ba20fa Mon Sep 17 00:00:00 2001 From: Rohit Nayak <57520317+rohit-nayak-ps@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:23:27 +0100 Subject: [PATCH 02/33] VReplication: extended e2e test for workflows with tables containing foreign key constraints (#14327) Signed-off-by: Rohit Nayak Signed-off-by: Matt Lord Co-authored-by: Matt Lord --- ...dtoend_vreplication_foreign_key_stress.yml | 170 ++++++ go/test/endtoend/cluster/vttablet_process.go | 24 + go/test/endtoend/vreplication/cluster_test.go | 5 +- .../fk_ext_load_generator_test.go | 503 ++++++++++++++++++ go/test/endtoend/vreplication/fk_ext_test.go | 450 ++++++++++++++++ go/test/endtoend/vreplication/fk_test.go | 11 +- go/test/endtoend/vreplication/helper_test.go | 89 +++- .../vreplication/movetables_buffering_test.go | 2 +- .../partial_movetables_seq_test.go | 12 +- .../vreplication/partial_movetables_test.go | 23 +- .../resharding_workflows_v2_test.go | 36 +- .../schema/fkext/materialize_schema.sql | 2 + .../schema/fkext/source_schema.sql | 2 + .../schema/fkext/source_vschema.json | 6 + .../schema/fkext/target1_vschema.json | 28 + .../schema/fkext/target2_vschema.json | 43 ++ go/test/endtoend/vreplication/vdiff2_test.go | 2 + .../vreplication/vdiff_helper_test.go | 91 +++- .../vdiff_multiple_movetables_test.go | 12 +- .../endtoend/vreplication/wrappers_test.go | 243 +++++++-- .../tabletmanager/vreplication/vplayer.go | 15 +- go/vt/wrangler/traffic_switcher.go | 4 +- test/ci_workflow_gen.go | 1 + test/config.json | 9 + 24 files changed, 1698 insertions(+), 85 deletions(-) create mode 100644 .github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml create mode 100644 go/test/endtoend/vreplication/fk_ext_load_generator_test.go create mode 100644 go/test/endtoend/vreplication/fk_ext_test.go create mode 100644 go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql create mode 100644 go/test/endtoend/vreplication/schema/fkext/source_schema.sql create mode 100644 go/test/endtoend/vreplication/schema/fkext/source_vschema.json create mode 100644 go/test/endtoend/vreplication/schema/fkext/target1_vschema.json create mode 100644 go/test/endtoend/vreplication/schema/fkext/target2_vschema.json diff --git a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml new file mode 100644 index 00000000000..50b65ecc27d --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml @@ -0,0 +1,170 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_foreign_key_stress) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_foreign_key_stress)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (vreplication_foreign_key_stress) + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.3 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + # Install everything else we need, and configure + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql80.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard vreplication_foreign_key_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index ed6a97a15c2..f51e1838a5b 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -79,6 +79,7 @@ type VttabletProcess struct { DbFlavor string Charset string ConsolidationsURL string + IsPrimary bool // Extra Args to be set before starting the vttablet process ExtraArgs []string @@ -460,6 +461,29 @@ func (vttablet *VttabletProcess) QueryTablet(query string, keyspace string, useD return executeQuery(conn, query) } +// QueryTabletMultiple lets you execute multiple queries -- without any +// results -- against the tablet. +func (vttablet *VttabletProcess) QueryTabletMultiple(queries []string, keyspace string, useDb bool) error { + if !useDb { + keyspace = "" + } + dbParams := NewConnParams(vttablet.DbPort, vttablet.DbPassword, path.Join(vttablet.Directory, "mysql.sock"), keyspace) + conn, err := vttablet.conn(&dbParams) + if err != nil { + return err + } + defer conn.Close() + + for _, query := range queries { + log.Infof("Executing query %s (on %s)", query, vttablet.Name) + _, err := executeQuery(conn, query) + if err != nil { + return err + } + } + return nil +} + func (vttablet *VttabletProcess) defaultConn(dbname string) (*mysql.Conn, error) { dbParams := mysql.ConnParams{ Uname: "vt_dba", diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 4735e94560f..89cebc7d0b1 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -544,6 +544,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa tablets = append(tablets, primary) dbProcesses = append(dbProcesses, proc) primaryTabletUID = primary.Vttablet.TabletUID + primary.Vttablet.IsPrimary = true } for i := 0; i < numReplicas; i++ { @@ -795,13 +796,13 @@ func (vc *VitessCluster) getPrimaryTablet(t *testing.T, ksName, shardName string continue } for _, tablet := range shard.Tablets { - if tablet.Vttablet.GetTabletStatus() == "SERVING" { + if tablet.Vttablet.IsPrimary { return tablet.Vttablet } } } } - require.FailNow(t, "no primary found for %s:%s", ksName, shardName) + require.FailNow(t, "no primary found", "keyspace %s, shard %s", ksName, shardName) return nil } diff --git a/go/test/endtoend/vreplication/fk_ext_load_generator_test.go b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go new file mode 100644 index 00000000000..12b5871781f --- /dev/null +++ b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go @@ -0,0 +1,503 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" +) + +const ( + // Only used when debugging tests. + queryLog = "queries.txt" + + LoadGeneratorStateLoading = "loading" + LoadGeneratorStateRunning = "running" + LoadGeneratorStateStopped = "stopped" + + dataLoadTimeout = 1 * time.Minute + tickInterval = 1 * time.Second + queryTimeout = 1 * time.Minute + + getRandomIdQuery = "SELECT id FROM %s.parent ORDER BY RAND() LIMIT 1" + insertQuery = "INSERT INTO %s.parent (id, name) VALUES (%d, 'name-%d')" + updateQuery = "UPDATE %s.parent SET name = 'rename-%d' WHERE id = %d" + deleteQuery = "DELETE FROM %s.parent WHERE id = %d" + insertChildQuery = "INSERT INTO %s.child (id, parent_id) VALUES (%d, %d)" + insertChildQueryOverrideConstraints = "INSERT /*+ SET_VAR(foreign_key_checks=0) */ INTO %s.child (id, parent_id) VALUES (%d, %d)" +) + +// ILoadGenerator is an interface for load generators that we will use to simulate different types of loads. +type ILoadGenerator interface { + Init(ctx context.Context, vc *VitessCluster) // name & description only for logging. + Teardown() + + // "direct", use direct db connection to primary, only for unsharded keyspace. + // or "vtgate" to use vtgate routing. + // Stop() before calling SetDBStrategy(). + SetDBStrategy(direct, keyspace string) + SetOverrideConstraints(allow bool) // true if load generator can insert rows without FK constraints. + + Keyspace() string + DBStrategy() string // direct or vtgate + State() string // state of load generator (stopped, running) + OverrideConstraints() bool // true if load generator can insert rows without FK constraints. + + Load() error // initial load of data. + Start() error // start populating additional data. + Stop() error // stop populating additional data. + + // Implementation will decide which table to wait for extra rows on. + WaitForAdditionalRows(count int) error + // table == "", implementation will decide which table to get rows from, same table as in WaitForAdditionalRows(). + GetRowCount(table string) (int, error) +} + +var lg ILoadGenerator + +var _ ILoadGenerator = (*SimpleLoadGenerator)(nil) + +type LoadGenerator struct { + ctx context.Context + vc *VitessCluster + state string + dbStrategy string + overrideConstraints bool + keyspace string + tables []string +} + +// SimpleLoadGenerator, which has a single parent table and a single child table for which different types +// of DMLs are run. +type SimpleLoadGenerator struct { + LoadGenerator + currentParentId int + currentChildId int + ch chan bool + runCtx context.Context + runCtxCancel context.CancelFunc +} + +func (lg *SimpleLoadGenerator) SetOverrideConstraints(allow bool) { + lg.overrideConstraints = allow +} + +func (lg *SimpleLoadGenerator) OverrideConstraints() bool { + return lg.overrideConstraints +} + +func (lg *SimpleLoadGenerator) GetRowCount(table string) (int, error) { + vtgateConn, err := lg.getVtgateConn(context.Background()) + if err != nil { + return 0, err + } + defer vtgateConn.Close() + return lg.getNumRows(vtgateConn, table), nil +} + +func (lg *SimpleLoadGenerator) getVtgateConn(ctx context.Context) (*mysql.Conn, error) { + vtParams := mysql.ConnParams{ + Host: lg.vc.ClusterConfig.hostname, + Port: lg.vc.ClusterConfig.vtgateMySQLPort, + Uname: "vt_dba", + } + conn, err := mysql.Connect(ctx, &vtParams) + return conn, err +} + +func (lg *SimpleLoadGenerator) getNumRows(vtgateConn *mysql.Conn, table string) int { + t := lg.vc.t + return getRowCount(t, vtgateConn, table) +} + +func (lg *SimpleLoadGenerator) WaitForAdditionalRows(count int) error { + t := lg.vc.t + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + numRowsStart := lg.getNumRows(vtgateConn, "parent") + shortCtx, cancel := context.WithTimeout(context.Background(), dataLoadTimeout) + defer cancel() + for { + select { + case <-shortCtx.Done(): + t.Fatalf("Timed out waiting for additional rows in %q table", "parent") + default: + numRows := lg.getNumRows(vtgateConn, "parent") + if numRows >= numRowsStart+count { + return nil + } + time.Sleep(tickInterval) + } + } +} + +func (lg *SimpleLoadGenerator) exec(query string) (*sqltypes.Result, error) { + switch lg.dbStrategy { + case "direct": + // direct is expected to be used only for unsharded keyspaces to simulate an unmanaged keyspace + // that proxies to an external database. + primary := lg.vc.getPrimaryTablet(lg.vc.t, lg.keyspace, "0") + qr, err := primary.QueryTablet(query, lg.keyspace, true) + require.NoError(lg.vc.t, err) + return qr, err + case "vtgate": + return lg.execQueryWithRetry(query) + default: + err := fmt.Errorf("invalid dbStrategy: %v", lg.dbStrategy) + return nil, err + } +} + +// When a workflow switches traffic it is possible to get transient errors from vtgate while executing queries +// due to cluster-level changes. isQueryRetryable() checks for such errors so that tests can wait for such changes +// to complete before proceeding. +func isQueryRetryable(err error) bool { + retryableErrorStrings := []string{ + "retry", + "resharded", + "VT13001", + "Lock wait timeout exceeded", + "errno 2003", + } + for _, e := range retryableErrorStrings { + if strings.Contains(err.Error(), e) { + return true + } + } + return false +} + +func (lg *SimpleLoadGenerator) execQueryWithRetry(query string) (*sqltypes.Result, error) { + ctx, cancel := context.WithTimeout(context.Background(), queryTimeout) + defer cancel() + errCh := make(chan error) + qrCh := make(chan *sqltypes.Result) + var vtgateConn *mysql.Conn + go func() { + var qr *sqltypes.Result + var err error + retry := false + for { + select { + case <-ctx.Done(): + errCh <- fmt.Errorf("query %q did not succeed before the timeout of %s", query, queryTimeout) + return + default: + } + if lg.runCtx != nil && lg.runCtx.Err() != nil { + log.Infof("Load generator run context done, query never completed: %q", query) + errCh <- fmt.Errorf("load generator stopped") + return + } + if retry { + time.Sleep(tickInterval) + } + // We need to parse the error as well as the output of vdiff to determine if the error is retryable, since + // sometimes it is observed that we get the error output as part of vdiff output. + vtgateConn, err = lg.getVtgateConn(ctx) + if err != nil { + if !isQueryRetryable(err) { + errCh <- err + return + } + time.Sleep(tickInterval) + continue + } + qr, err = vtgateConn.ExecuteFetch(query, 1000, false) + vtgateConn.Close() + if err == nil { + qrCh <- qr + return + } + if !isQueryRetryable(err) { + errCh <- err + return + } + retry = true + } + }() + select { + case qr := <-qrCh: + return qr, nil + case err := <-errCh: + log.Infof("query %q failed with error %v", query, err) + return nil, err + } +} + +func (lg *SimpleLoadGenerator) Load() error { + lg.state = LoadGeneratorStateLoading + defer func() { lg.state = LoadGeneratorStateStopped }() + log.Infof("Inserting initial FK data") + var queries = []string{ + "insert into parent values(1, 'parent1'), (2, 'parent2');", + "insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');", + } + for _, query := range queries { + _, err := lg.exec(query) + require.NoError(lg.vc.t, err) + } + log.Infof("Done inserting initial FK data") + return nil +} + +func (lg *SimpleLoadGenerator) Start() error { + if lg.state == LoadGeneratorStateRunning { + log.Infof("Load generator already running") + return nil + } + lg.state = LoadGeneratorStateRunning + go func() { + defer func() { + lg.state = LoadGeneratorStateStopped + log.Infof("Load generator stopped") + }() + lg.runCtx, lg.runCtxCancel = context.WithCancel(lg.ctx) + defer func() { + lg.runCtx = nil + lg.runCtxCancel = nil + }() + t := lg.vc.t + var err error + log.Infof("Load generator starting") + for i := 0; ; i++ { + if i%1000 == 0 { + // Log occasionally to show that the test is still running. + log.Infof("Load simulation iteration %d", i) + } + select { + case <-lg.ctx.Done(): + log.Infof("Load generator context done") + lg.ch <- true + return + case <-lg.runCtx.Done(): + log.Infof("Load generator run context done") + lg.ch <- true + return + default: + } + op := rand.Intn(100) + switch { + case op < 50: // 50% chance to insert + lg.insert() + case op < 80: // 30% chance to update + lg.update() + default: // 20% chance to delete + lg.delete() + } + require.NoError(t, err) + time.Sleep(1 * time.Millisecond) + } + }() + return nil +} + +func (lg *SimpleLoadGenerator) Stop() error { + if lg.state == LoadGeneratorStateStopped { + log.Infof("Load generator already stopped") + return nil + } + if lg.runCtx != nil && lg.runCtxCancel != nil { + log.Infof("Canceling load generator") + lg.runCtxCancel() + } + // Wait for ch to be closed or we hit a timeout. + timeout := vdiffTimeout + select { + case <-lg.ch: + log.Infof("Load generator stopped") + lg.state = LoadGeneratorStateStopped + return nil + case <-time.After(timeout): + log.Infof("Timed out waiting for load generator to stop") + return fmt.Errorf("timed out waiting for load generator to stop") + } +} + +func (lg *SimpleLoadGenerator) Init(ctx context.Context, vc *VitessCluster) { + lg.ctx = ctx + lg.vc = vc + lg.state = LoadGeneratorStateStopped + lg.currentParentId = 100 + lg.currentChildId = 100 + lg.ch = make(chan bool) + lg.tables = []string{"parent", "child"} +} + +func (lg *SimpleLoadGenerator) Teardown() { + // noop +} + +func (lg *SimpleLoadGenerator) SetDBStrategy(strategy, keyspace string) { + lg.dbStrategy = strategy + lg.keyspace = keyspace +} + +func (lg *SimpleLoadGenerator) Keyspace() string { + return lg.keyspace +} + +func (lg *SimpleLoadGenerator) DBStrategy() string { + return lg.dbStrategy +} + +func (lg *SimpleLoadGenerator) State() string { + return lg.state +} + +func isQueryCancelled(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "load generator stopped") +} + +func (lg *SimpleLoadGenerator) insert() { + t := lg.vc.t + currentParentId++ + query := fmt.Sprintf(insertQuery, lg.keyspace, currentParentId, currentParentId) + qr, err := lg.exec(query) + if isQueryCancelled(err) { + return + } + require.NoError(t, err) + require.NotNil(t, qr) + // Insert one or more children, some with valid foreign keys, some without. + for i := 0; i < rand.Intn(4)+1; i++ { + currentChildId++ + if i == 3 && lg.overrideConstraints { + query = fmt.Sprintf(insertChildQueryOverrideConstraints, lg.keyspace, currentChildId, currentParentId+1000000) + lg.exec(query) + } else { + query = fmt.Sprintf(insertChildQuery, lg.keyspace, currentChildId, currentParentId) + lg.exec(query) + } + } +} + +func (lg *SimpleLoadGenerator) getRandomId() int64 { + t := lg.vc.t + qr, err := lg.exec(fmt.Sprintf(getRandomIdQuery, lg.keyspace)) + if isQueryCancelled(err) { + return 0 + } + require.NoError(t, err) + require.NotNil(t, qr) + if len(qr.Rows) == 0 { + return 0 + } + id, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return id +} + +func (lg *SimpleLoadGenerator) update() { + id := lg.getRandomId() + if id == 0 { + return + } + updateQuery := fmt.Sprintf(updateQuery, lg.keyspace, id, id) + _, err := lg.exec(updateQuery) + if isQueryCancelled(err) { + return + } + require.NoError(lg.vc.t, err) +} + +func (lg *SimpleLoadGenerator) delete() { + id := lg.getRandomId() + if id == 0 { + return + } + deleteQuery := fmt.Sprintf(deleteQuery, lg.keyspace, id) + _, err := lg.exec(deleteQuery) + if isQueryCancelled(err) { + return + } + require.NoError(lg.vc.t, err) +} + +// FIXME: following three functions are copied over from vtgate test utility functions in +// `go/test/endtoend/utils/utils.go`. +// We will to refactor and then reuse the same functionality from vtgate tests, in the near future. + +func convertToMap(input interface{}) map[string]interface{} { + output := input.(map[string]interface{}) + return output +} + +func getTableT2Map(res *interface{}, ks, tbl string) map[string]interface{} { + step1 := convertToMap(*res)["keyspaces"] + step2 := convertToMap(step1)[ks] + step3 := convertToMap(step2)["tables"] + tblMap := convertToMap(step3)[tbl] + return convertToMap(tblMap) +} + +// waitForColumn waits for a table's column to be present in the vschema because vtgate's foreign key managed mode +// expects the column to be present in the vschema before it can be used in a foreign key constraint. +func waitForColumn(t *testing.T, vtgateProcess *cluster.VtgateProcess, ks, tbl, col string) error { + timeout := time.After(defaultTimeout) + for { + select { + case <-timeout: + return fmt.Errorf("schema tracking did not find column '%s' in table '%s'", col, tbl) + default: + time.Sleep(defaultTick) + res, err := vtgateProcess.ReadVSchema() + require.NoError(t, err, res) + t2Map := getTableT2Map(res, ks, tbl) + authoritative, fieldPresent := t2Map["column_list_authoritative"] + if !fieldPresent { + break + } + authoritativeBool, isBool := authoritative.(bool) + if !isBool || !authoritativeBool { + break + } + colMap, exists := t2Map["columns"] + if !exists { + break + } + colList, isSlice := colMap.([]interface{}) + if !isSlice { + break + } + for _, c := range colList { + colDef, isMap := c.(map[string]interface{}) + if !isMap { + break + } + if colName, exists := colDef["name"]; exists && colName == col { + log.Infof("Found column '%s' in table '%s' for keyspace '%s'", col, tbl, ks) + return nil + } + } + } + } +} diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go new file mode 100644 index 00000000000..665f8052486 --- /dev/null +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -0,0 +1,450 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + _ "embed" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +const ( + shardStatusWaitTimeout = 30 * time.Second +) + +var ( + //go:embed schema/fkext/source_schema.sql + FKExtSourceSchema string + //go:embed schema/fkext/source_vschema.json + FKExtSourceVSchema string + //go:embed schema/fkext/target1_vschema.json + FKExtTarget1VSchema string + //go:embed schema/fkext/target2_vschema.json + FKExtTarget2VSchema string + //go:embed schema/fkext/materialize_schema.sql + FKExtMaterializeSchema string +) + +type fkextConfigType struct { + *ClusterConfig + sourceKeyspaceName string + target1KeyspaceName string + target2KeyspaceName string + cell string +} + +var fkextConfig *fkextConfigType + +func initFKExtConfig(t *testing.T) { + fkextConfig = &fkextConfigType{ + ClusterConfig: mainClusterConfig, + sourceKeyspaceName: "source", + target1KeyspaceName: "target1", + target2KeyspaceName: "target2", + cell: "zone1", + } +} + +/* +TestFKExt is an end-to-end test for validating the foreign key implementation with respect to, both vreplication +flows and vtgate processing of DMLs for tables with foreign key constraints. It currently: +* Sets up a source keyspace, to simulate the external database, with a parent and child table with a foreign key constraint. +* Creates a target keyspace with two shards, the Vitess keyspace, into which the source data is imported. +* Imports the data using MoveTables. This uses the atomic copy flow +to test that we can import data with foreign key constraints and that data is kept consistent even after the copy phase +since the tables continue to have the FK Constraints. +* Creates a new keyspace with two shards, the Vitess keyspace, into which the data is migrated using MoveTables. +* Materializes the parent and child tables into a different keyspace. +* Reshards the keyspace from 2 shards to 3 shards. +* Reshards the keyspace from 3 shards to 1 shard. + +We drop constraints from the tables from some replicas to simulate a replica that is not doing cascades in +innodb, to confirm that vtgate's fkmanaged mode is working properly. +*/ + +func TestFKExt(t *testing.T) { + setSidecarDBName("_vt") + + // Ensure that there are multiple copy phase cycles per table. + extraVTTabletArgs = append(extraVTTabletArgs, "--vstream_packet_size=256", "--queryserver-config-schema-change-signal") + extraVTGateArgs = append(extraVTGateArgs, "--schema_change_signal=true", "--planner-version", "Gen4") + defer func() { extraVTTabletArgs = nil }() + initFKExtConfig(t) + + cellName := fkextConfig.cell + cells := []string{cellName} + vc = NewVitessCluster(t, t.Name(), cells, fkextConfig.ClusterConfig) + + require.NotNil(t, vc) + allCellNames = cellName + defaultCellName := cellName + defaultCell = vc.Cells[defaultCellName] + cell := vc.Cells[cellName] + + defer vc.TearDown(t) + + sourceKeyspace := fkextConfig.sourceKeyspaceName + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, "0", FKExtSourceVSchema, FKExtSourceSchema, 0, 0, 100, nil) + + vtgate = cell.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, "0") + require.NoError(t, err) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, "0"), 1, shardStatusWaitTimeout)) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + lg = &SimpleLoadGenerator{} + lg.Init(context.Background(), vc) + lg.SetDBStrategy("vtgate", fkextConfig.sourceKeyspaceName) + if lg.Load() != nil { + t.Fatal("Load failed") + } + if lg.Start() != nil { + t.Fatal("Start failed") + } + t.Run("Import from external db", func(t *testing.T) { + // Import data into vitess from sourceKeyspace to target1Keyspace, both unsharded. + importIntoVitess(t) + }) + + t.Run("MoveTables from unsharded to sharded keyspace", func(t *testing.T) { + // Migrate data from target1Keyspace to the sharded target2Keyspace. Drops constraints from + // replica to simulate a replica that is not doing cascades in innodb to test vtgate's fkmanaged mode. + // The replica with dropped constraints is used as source for the next workflow called in materializeTables(). + moveKeyspace(t) + }) + + t.Run("Materialize parent and copy tables without constraints", func(t *testing.T) { + // Materialize the tables from target2Keyspace to target1Keyspace. Stream only from replicas, one + // shard with constraints dropped. + materializeTables(t) + }) + lg.SetDBStrategy("vtgate", fkextConfig.target2KeyspaceName) + if lg.Start() != nil { + t.Fatal("Start failed") + } + threeShards := "-40,40-c0,c0-" + keyspaceName := fkextConfig.target2KeyspaceName + ks := vc.Cells[fkextConfig.cell].Keyspaces[keyspaceName] + numReplicas := 1 + + t.Run("Reshard keyspace from 2 shards to 3 shards", func(t *testing.T) { + tabletID := 500 + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, threeShards, numReplicas, 0, tabletID, nil)) + tablets := make(map[string]*cluster.VttabletProcess) + for i, shard := range strings.Split(threeShards, ",") { + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) + tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID+i*100)].Vttablet + } + sqls := strings.Split(FKExtSourceSchema, "\n") + for _, sql := range sqls { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", + "--ddl_strategy=direct", "--sql", sql, keyspaceName) + require.NoErrorf(t, err, output) + } + doReshard(t, fkextConfig.target2KeyspaceName, "reshard2to3", "-80,80-", threeShards, tablets) + }) + t.Run("Reshard keyspace from 3 shards to 1 shard", func(t *testing.T) { + tabletID := 800 + shard := "0" + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, shard, numReplicas, 0, tabletID, nil)) + tablets := make(map[string]*cluster.VttabletProcess) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), numReplicas, shardStatusWaitTimeout)) + tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID)].Vttablet + sqls := strings.Split(FKExtSourceSchema, "\n") + for _, sql := range sqls { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", + "--ddl_strategy=direct", "--sql", sql, keyspaceName) + require.NoErrorf(t, err, output) + } + doReshard(t, fkextConfig.target2KeyspaceName, "reshard3to1", threeShards, "0", tablets) + }) + lg.Stop() + waitForLowLag(t, fkextConfig.target1KeyspaceName, "mat") + t.Run("Validate materialize counts at end of test", func(t *testing.T) { + validateMaterializeRowCounts(t) + }) + +} + +// compareRowCounts compares the row counts for the parent and child tables in the source and target shards. In addition to vdiffs, +// it is another check to ensure that both tables have the same number of rows in the source and target shards after load generation +// has stopped. +func compareRowCounts(t *testing.T, keyspace string, sourceShards, targetShards []string) error { + log.Infof("Comparing row counts for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards) + lg.Stop() + defer lg.Start() + if err := waitForCondition("load generator to stop", func() bool { return lg.State() == LoadGeneratorStateStopped }, 10*time.Second); err != nil { + return err + } + + sourceTabs := make(map[string]*cluster.VttabletProcess) + targetTabs := make(map[string]*cluster.VttabletProcess) + for _, shard := range sourceShards { + sourceTabs[shard] = vc.getPrimaryTablet(t, keyspace, shard) + } + for _, shard := range targetShards { + targetTabs[shard] = vc.getPrimaryTablet(t, keyspace, shard) + } + + getCount := func(tab *cluster.VttabletProcess, table string) (int64, error) { + qr, err := tab.QueryTablet(fmt.Sprintf("select count(*) from %s", table), keyspace, true) + if err != nil { + return 0, err + } + return qr.Rows[0][0].ToInt64() + } + + var sourceParentCount, sourceChildCount int64 + var targetParentCount, targetChildCount int64 + for _, tab := range sourceTabs { + count, _ := getCount(tab, "parent") + sourceParentCount += count + count, _ = getCount(tab, "child") + sourceChildCount += count + } + for _, tab := range targetTabs { + count, _ := getCount(tab, "parent") + targetParentCount += count + count, _ = getCount(tab, "child") + targetChildCount += count + } + log.Infof("Source parent count: %d, child count: %d, target parent count: %d, child count: %d.", + sourceParentCount, sourceChildCount, targetParentCount, targetChildCount) + if sourceParentCount != targetParentCount || sourceChildCount != targetChildCount { + return fmt.Errorf(fmt.Sprintf("source and target row counts do not match; source parent count: %d, target parent count: %d, source child count: %d, target child count: %d", + sourceParentCount, targetParentCount, sourceChildCount, targetChildCount)) + } + return nil +} + +func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { + rs := newReshard(vc, &reshardWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: keyspace, + }, + sourceShards: sourceShards, + targetShards: targetShards, + skipSchemaCopy: true, + }, workflowFlavorVtctl) + rs.Create() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + for _, targetTab := range targetTabs { + catchup(t, targetTab, workflowName, "Reshard") + } + vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + rs.SwitchReadsAndWrites() + //if lg.WaitForAdditionalRows(100) != nil { + // t.Fatal("WaitForAdditionalRows failed") + //} + waitForLowLag(t, keyspace, workflowName+"_reverse") + if compareRowCounts(t, keyspace, strings.Split(sourceShards, ","), strings.Split(targetShards, ",")) != nil { + t.Fatal("Row counts do not match") + } + vdiff(t, keyspace, workflowName+"_reverse", fkextConfig.cell, true, false, nil) + + rs.ReverseReadsAndWrites() + //if lg.WaitForAdditionalRows(100) != nil { + // t.Fatal("WaitForAdditionalRows failed") + //} + waitForLowLag(t, keyspace, workflowName) + if compareRowCounts(t, keyspace, strings.Split(targetShards, ","), strings.Split(sourceShards, ",")) != nil { + t.Fatal("Row counts do not match") + } + vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + + rs.SwitchReadsAndWrites() + rs.Complete() +} + +func areRowCountsEqual(t *testing.T) bool { + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + parentRowCount := getRowCount(t, vtgateConn, "target2.parent") + childRowCount := getRowCount(t, vtgateConn, "target2.child") + parentCopyRowCount := getRowCount(t, vtgateConn, "target1.parent_copy") + childCopyRowCount := getRowCount(t, vtgateConn, "target1.child_copy") + log.Infof("Post-materialize row counts are parent: %d, child: %d, parent_copy: %d, child_copy: %d", + parentRowCount, childRowCount, parentCopyRowCount, childCopyRowCount) + if parentRowCount != parentCopyRowCount || childRowCount != childCopyRowCount { + return false + } + return true +} + +// validateMaterializeRowCounts expects the Load generator to be stopped before calling it. +func validateMaterializeRowCounts(t *testing.T) { + if lg.State() != LoadGeneratorStateStopped { + t.Fatal("Load generator was unexpectedly still running when validateMaterializeRowCounts was called -- this will produce unreliable results.") + } + areRowCountsEqual2 := func() bool { + return areRowCountsEqual(t) + } + require.NoError(t, waitForCondition("row counts to be equal", areRowCountsEqual2, defaultTimeout)) +} + +const fkExtMaterializeSpec = ` +{"workflow": "%s", "source_keyspace": "%s", "target_keyspace": "%s", +"table_settings": [ {"target_table": "parent_copy", "source_expression": "select * from parent" },{"target_table": "child_copy", "source_expression": "select * from child" }], +"tablet_types": "replica"}` + +func materializeTables(t *testing.T) { + wfName := "mat" + err := vc.VtctlClient.ExecuteCommand("ApplySchema", "--", "--ddl_strategy=direct", + "--sql", FKExtMaterializeSchema, fkextConfig.target1KeyspaceName) + require.NoError(t, err, fmt.Sprintf("ApplySchema Error: %s", err)) + materializeSpec := fmt.Sprintf(fkExtMaterializeSpec, "mat", fkextConfig.target2KeyspaceName, fkextConfig.target1KeyspaceName) + err = vc.VtctlClient.ExecuteCommand("Materialize", materializeSpec) + require.NoError(t, err, "Materialize") + tab := vc.getPrimaryTablet(t, fkextConfig.target1KeyspaceName, "0") + catchup(t, tab, wfName, "Materialize") + validateMaterializeRowCounts(t) +} + +func moveKeyspace(t *testing.T) { + targetTabs := newKeyspace(t, fkextConfig.target2KeyspaceName, "-80,80-", FKExtTarget2VSchema, FKExtSourceSchema, 300, 1) + shard := "-80" + tabletId := fmt.Sprintf("%s-%d", fkextConfig.cell, 301) + replicaTab := vc.Cells[fkextConfig.cell].Keyspaces[fkextConfig.target2KeyspaceName].Shards[shard].Tablets[tabletId].Vttablet + dropReplicaConstraints(t, fkextConfig.target2KeyspaceName, replicaTab) + doMoveTables(t, fkextConfig.target1KeyspaceName, fkextConfig.target2KeyspaceName, "move", "replica", targetTabs, false) +} + +func newKeyspace(t *testing.T, keyspaceName, shards, vschema, schema string, tabletId, numReplicas int) map[string]*cluster.VttabletProcess { + tablets := make(map[string]*cluster.VttabletProcess) + cellName := fkextConfig.cell + cell := vc.Cells[fkextConfig.cell] + vc.AddKeyspace(t, []*Cell{cell}, keyspaceName, shards, vschema, schema, numReplicas, 0, tabletId, nil) + for i, shard := range strings.Split(shards, ",") { + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, shardStatusWaitTimeout)) + tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletId+i*100)].Vttablet + } + err := vc.VtctldClient.ExecuteCommand("RebuildVSchemaGraph") + require.NoError(t, err) + require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "parent", "id")) + require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "child", "parent_id")) + return tablets +} + +func doMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, tabletTypes string, targetTabs map[string]*cluster.VttabletProcess, atomicCopy bool) { + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + tabletTypes: tabletTypes, + }, + sourceKeyspace: sourceKeyspace, + atomicCopy: atomicCopy, + }, workflowFlavorRandom) + mt.Create() + + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + + for _, targetTab := range targetTabs { + catchup(t, targetTab, workflowName, "MoveTables") + } + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + lg.SetDBStrategy("vtgate", targetKeyspace) + if lg.Start() != nil { + t.Fatal("Start failed") + } + + mt.SwitchReadsAndWrites() + + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + + waitForLowLag(t, sourceKeyspace, workflowName+"_reverse") + vdiff(t, sourceKeyspace, workflowName+"_reverse", fkextConfig.cell, false, true, nil) + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + + mt.ReverseReadsAndWrites() + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + waitForLowLag(t, targetKeyspace, workflowName) + time.Sleep(5 * time.Second) + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + mt.SwitchReadsAndWrites() + mt.Complete() + if err := vc.VtctldClient.ExecuteCommand("ApplyRoutingRules", "--rules={}"); err != nil { + t.Fatal(err) + } +} + +func importIntoVitess(t *testing.T) { + targetTabs := newKeyspace(t, fkextConfig.target1KeyspaceName, "0", FKExtTarget1VSchema, FKExtSourceSchema, 200, 1) + doMoveTables(t, fkextConfig.sourceKeyspaceName, fkextConfig.target1KeyspaceName, "import", "primary", targetTabs, true) +} + +const getConstraintsQuery = ` +SELECT CONSTRAINT_NAME, TABLE_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE +WHERE TABLE_SCHEMA = '%s' AND REFERENCED_TABLE_NAME IS NOT NULL; +` + +// dropReplicaConstraints drops all foreign key constraints on replica tables for a given keyspace/shard. +// We do this so that we can replay binlogs from a replica which is not doing cascades but just replaying +// the binlogs created by the primary. This will confirm that vtgate is doing the cascades correctly. +func dropReplicaConstraints(t *testing.T, keyspaceName string, tablet *cluster.VttabletProcess) { + var dropConstraints []string + require.Equal(t, "replica", strings.ToLower(tablet.TabletType)) + dbName := "vt_" + keyspaceName + qr, err := tablet.QueryTablet(fmt.Sprintf(getConstraintsQuery, dbName), keyspaceName, true) + if err != nil { + t.Fatal(err) + } + for _, row := range qr.Rows { + constraintName := row[0].ToString() + tableName := row[1].ToString() + dropConstraints = append(dropConstraints, fmt.Sprintf("ALTER TABLE `%s`.`%s` DROP FOREIGN KEY `%s`", + dbName, tableName, constraintName)) + } + prefixQueries := []string{ + "set sql_log_bin=0", + "SET @@global.super_read_only=0", + } + suffixQueries := []string{ + "SET @@global.super_read_only=1", + "set sql_log_bin=1", + } + queries := append(prefixQueries, dropConstraints...) + queries = append(queries, suffixQueries...) + require.NoError(t, tablet.QueryTabletMultiple(queries, keyspaceName, true)) +} diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 31886864f11..06e94e0222d 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -94,12 +94,15 @@ func TestFKWorkflow(t *testing.T) { workflowName := "fk" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, sourceKeyspace: sourceKeyspace, atomicCopy: true, - }, moveTablesFlavorRandom) + }, workflowFlavorRandom) mt.Create() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index d2154f13f1f..07c12caf194 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "net/http" + "os" "os/exec" "regexp" "sort" @@ -56,6 +57,11 @@ const ( workflowStateTimeout = 90 * time.Second ) +func setSidecarDBName(dbName string) { + sidecarDBName = dbName + sidecarDBIdentifier = sqlparser.String(sqlparser.NewIdentifierCS(sidecarDBName)) +} + func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines string) { queries := strings.Split(lines, "\n") for _, query := range queries { @@ -65,8 +71,35 @@ func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines execVtgateQuery(t, conn, database, string(query)) } } + +func execQueryWithRetry(t *testing.T, conn *mysql.Conn, query string, timeout time.Duration) *sqltypes.Result { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(defaultTick) + defer ticker.Stop() + + var qr *sqltypes.Result + var err error + for { + qr, err = conn.ExecuteFetch(query, 1000, false) + if err == nil { + return qr + } + select { + case <-ctx.Done(): + require.FailNow(t, fmt.Sprintf("query %q did not succeed before the timeout of %s; last seen result: %v", + query, timeout, qr.Rows)) + case <-ticker.C: + log.Infof("query %q failed with error %v, retrying in %ds", query, err, defaultTick) + } + } +} + func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { qr, err := conn.ExecuteFetch(query, 1000, false) + if err != nil { + log.Errorf("Error executing query: %s: %v", query, err) + } require.NoError(t, err) return qr } @@ -79,7 +112,7 @@ func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { } ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) + require.NoErrorf(t, err, "error connecting to vtgate on %s:%d", hostname, port) return conn } @@ -96,6 +129,19 @@ func execVtgateQuery(t *testing.T, conn *mysql.Conn, database string, query stri return qr } +func execVtgateQueryWithRetry(t *testing.T, conn *mysql.Conn, database string, query string, timeout time.Duration) *sqltypes.Result { + if strings.TrimSpace(query) == "" { + return nil + } + if database != "" { + execQuery(t, conn, "use `"+database+"`;") + } + execQuery(t, conn, "begin") + qr := execQueryWithRetry(t, conn, query, timeout) + execQuery(t, conn, "commit") + return qr +} + func checkHealth(t *testing.T, url string) bool { resp, err := http.Get(url) require.NoError(t, err) @@ -703,6 +749,13 @@ func isBinlogRowImageNoBlob(t *testing.T, tablet *cluster.VttabletProcess) bool return mode == "noblob" } +func getRowCount(t *testing.T, vtgateConn *mysql.Conn, table string) int { + query := fmt.Sprintf("select count(*) from %s", table) + qr := execVtgateQuery(t, vtgateConn, "", query) + numRows, _ := qr.Rows[0][0].ToInt() + return numRows +} + const ( loadTestBufferingWindowDurationStr = "30s" loadTestPostBufferingInsertWindow = 60 * time.Second // should be greater than loadTestBufferingWindowDurationStr @@ -820,3 +873,37 @@ func (lg *loadGenerator) waitForCount(want int64) { } } } + +// appendToQueryLog is useful when debugging tests. +func appendToQueryLog(msg string) { + file, err := os.OpenFile(queryLog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Errorf("Error opening query log file: %v", err) + return + } + defer file.Close() + if _, err := file.WriteString(msg + "\n"); err != nil { + log.Errorf("Error writing to query log file: %v", err) + } +} + +func waitForCondition(name string, condition func() bool, timeout time.Duration) error { + if condition() { + return nil + } + + ticker := time.NewTicker(tickInterval) + defer ticker.Stop() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + select { + case <-ticker.C: + if condition() { + return nil + } + case <-ctx.Done(): + return fmt.Errorf("%s: waiting for %s", ctx.Err(), name) + } + } +} diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 4e4b7cada97..113587a1669 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -21,7 +21,7 @@ func TestMoveTablesBuffering(t *testing.T) { setupMinimalCustomerKeyspace(t) tables := "loadtest" err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "", false) + tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index 6a1ed92cb9c..f8dc440b62d 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -239,7 +239,7 @@ func (wf *workflow) create() { currentWorkflowType = wrangler.MoveTablesWorkflow sourceShards := strings.Join(wf.options.sourceShards, ",") err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, - strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", false) + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", defaultWorkflowExecOptions) case "reshard": currentWorkflowType = wrangler.ReshardWorkflow sourceShards := strings.Join(wf.options.sourceShards, ",") @@ -248,7 +248,7 @@ func (wf *workflow) create() { targetShards = sourceShards } err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, - strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, false) + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) default: panic(fmt.Sprintf("unknown workflow type: %s", wf.typ)) } @@ -266,15 +266,15 @@ func (wf *workflow) create() { } func (wf *workflow) switchTraffic() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions)) } func (wf *workflow) reverseTraffic() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", defaultWorkflowExecOptions)) } func (wf *workflow) complete() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions)) } // TestPartialMoveTablesWithSequences enhances TestPartialMoveTables by adding an unsharded keyspace which has a @@ -505,7 +505,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { // We switched traffic, so it's the reverse workflow we want to cancel. reverseWf := wf + "_reverse" reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) output, err := tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 5583232fbdc..d9573b50e4a 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -44,13 +44,16 @@ func testCancel(t *testing.T) { table := "customer2" shard := "80-" // start the partial movetables for 80- - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, sourceKeyspace: sourceKeyspace, tables: table, sourceShards: shard, - }, moveTablesFlavorRandom) + }, workflowFlavorRandom) mt.Create() checkDenyList := func(keyspace string, expected bool) { @@ -141,7 +144,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { // start the partial movetables for 80- err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer,loadtest", workflowActionCreate, "", shard, "", false) + "customer,loadtest", workflowActionCreate, "", shard, "", defaultWorkflowExecOptions) require.NoError(t, err) var lg *loadGenerator if runWithLoad { // start load after routing rules are set, otherwise we end up with ambiguous tables @@ -214,7 +217,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions)) expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", targetKs, wfName, shard, shard) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -272,7 +275,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "", false) + err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer @@ -285,14 +288,14 @@ func TestPartialMoveTablesBasic(t *testing.T) { ksWf = fmt.Sprintf("%s.%s", targetKs, wfName) // Start the partial movetables for -80, 80- has already been switched err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer,loadtest", workflowActionCreate, "", shard, "", false) + "customer,loadtest", workflowActionCreate, "", shard, "", defaultWorkflowExecOptions) require.NoError(t, err) targetTab2 := vc.getPrimaryTablet(t, targetKs, shard) catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80") vdiffSideBySide(t, ksWf, "") // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions)) expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", targetKs, wfName) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -313,7 +316,7 @@ func TestPartialMoveTablesBasic(t *testing.T) { // We switched traffic, so it's the reverse workflow we want to cancel. reverseWf := wf + "_reverse" reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 338310fdf14..401147a3887 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -61,9 +61,18 @@ var ( currentWorkflowType wrangler.VReplicationWorkflowType ) +type workflowExecOptions struct { + deferSecondaryKeys bool + atomicCopy bool +} + +var defaultWorkflowExecOptions = &workflowExecOptions{ + deferSecondaryKeys: true, +} + func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, - "", workflowActionCreate, "", sourceShards, targetShards, false) + "", workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") @@ -78,7 +87,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { tables = tablesToMove } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "", false) + tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) @@ -88,10 +97,12 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", false) + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", defaultWorkflowExecOptions) } -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string, atomicCopy bool) error { +func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, + sourceShards, targetShards string, options *workflowExecOptions) error { + var args []string if currentWorkflowType == wrangler.MoveTablesWorkflow { args = append(args, "MoveTables") @@ -104,7 +115,7 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, if BypassLagCheck { args = append(args, "--max_replication_lag_allowed=2542087h") } - if atomicCopy { + if options.atomicCopy { args = append(args, "--atomic-copy") } switch action { @@ -125,7 +136,8 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, // Test new experimental --defer-secondary-keys flag switch currentWorkflowType { case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow, wrangler.ReshardWorkflow: - if !atomicCopy { + + if !options.atomicCopy && options.deferSecondaryKeys { args = append(args, "--defer-secondary-keys") } args = append(args, "--initialize-target-sequences") // Only used for MoveTables @@ -317,17 +329,17 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 from product to customer using currentWorkflowType = wrangler.MoveTablesWorkflow err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "customer2", workflowActionCreate, "", "", "", false) + "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "customer", "wf2") err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionSwitchTraffic, "", "", "", false) + "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionComplete, "", "", "", false) + "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) // sanity check @@ -352,16 +364,16 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "customer2", workflowActionCreate, "", "", "", false) + "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "product", "wf3") err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionSwitchTraffic, "", "", "", false) + "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionComplete, "", "", "", false) + "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) // sanity check diff --git a/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql b/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql new file mode 100644 index 00000000000..6af8ca99b94 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql @@ -0,0 +1,2 @@ +create table parent_copy(id int, name varchar(128), primary key(id)) engine=innodb; +create table child_copy(id int, parent_id int, name varchar(128), primary key(id)) engine=innodb; \ No newline at end of file diff --git a/go/test/endtoend/vreplication/schema/fkext/source_schema.sql b/go/test/endtoend/vreplication/schema/fkext/source_schema.sql new file mode 100644 index 00000000000..01b788338b6 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/source_schema.sql @@ -0,0 +1,2 @@ +create table if not exists parent(id int, name varchar(128), primary key(id)) engine=innodb; +create table if not exists child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; \ No newline at end of file diff --git a/go/test/endtoend/vreplication/schema/fkext/source_vschema.json b/go/test/endtoend/vreplication/schema/fkext/source_vschema.json new file mode 100644 index 00000000000..01cde0d643d --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/source_vschema.json @@ -0,0 +1,6 @@ +{ + "tables": { + "parent": {}, + "child": {} + } +} diff --git a/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json b/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json new file mode 100644 index 00000000000..dc89232fbbb --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json @@ -0,0 +1,28 @@ +{ + "sharded": false, + "foreignKeyMode": "managed", + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + + } +} diff --git a/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json b/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json new file mode 100644 index 00000000000..06e851a9007 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json @@ -0,0 +1,43 @@ +{ + "sharded": true, + "foreignKeyMode": "managed", + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + }, + "parent_copy": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child_copy": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + } +} diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 72b09e8fede..eb96985af57 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -323,6 +323,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { // expected number of rows in total (original run and resume) _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "resume", uuid, false) info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) @@ -375,6 +376,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // confirm that the VDiff was retried, able to complete, and we compared the expected // number of rows in total (original run and retry) info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 38ae9273a42..7dbc675886b 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -17,6 +17,7 @@ limitations under the License. package vreplication import ( + "context" "fmt" "strings" "testing" @@ -31,7 +32,10 @@ import ( ) const ( - vdiffTimeout = time.Second * 90 // we can leverage auto retry on error with this longer-than-usual timeout + vdiffTimeout = 90 * time.Second // we can leverage auto retry on error with this longer-than-usual timeout + vdiffRetryTimeout = 30 * time.Second + vdiffStatusCheckInterval = 1 * time.Second + vdiffRetryInterval = 5 * time.Second ) var ( @@ -66,6 +70,7 @@ func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *ex // update-table-stats is needed in order to test progress reports. uuid, _ := performVDiff2Action(t, true, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") info := waitForVDiff2ToComplete(t, true, ksWorkflow, cells, uuid, time.Time{}) + require.NotNil(t, info) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) if want != nil { @@ -90,9 +95,10 @@ func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cell ch := make(chan bool) go func() { for { - time.Sleep(1 * time.Second) + time.Sleep(vdiffStatusCheckInterval) _, jsonStr := performVDiff2Action(t, useVtctlclient, ksWorkflow, cells, "show", uuid, false) info = getVDiffInfo(jsonStr) + require.NotNil(t, info) if info.State == "completed" { if !completedAtMin.IsZero() { ca := info.CompletedAt @@ -136,6 +142,7 @@ func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cell case <-ch: return info case <-time.After(vdiffTimeout): + log.Errorf("VDiff never completed for UUID %s", uuid) require.FailNow(t, fmt.Sprintf("VDiff never completed for UUID %s", uuid)) return nil } @@ -153,7 +160,7 @@ func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *e // update-table-stats is needed in order to test progress reports. uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, time.Time{}) - + require.NotNil(t, info) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) if want != nil { @@ -186,7 +193,7 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a args = append(args, extraFlags...) } args = append(args, ksWorkflow, action, actionArg) - output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) + output, err = execVDiffWithRetry(t, expectError, false, args) log.Infof("vdiff output: %+v (err: %+v)", output, err) if !expectError { require.Nil(t, err) @@ -211,7 +218,8 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a if actionArg != "" { args = append(args, actionArg) } - output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) + + output, err = execVDiffWithRetry(t, expectError, true, args) log.Infof("vdiff output: %+v (err: %+v)", output, err) if !expectError { require.NoError(t, err) @@ -226,6 +234,79 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a return uuid, output } +// During SwitchTraffic, due to changes in the cluster, vdiff can return transient errors. isVDiffRetryable() is used to +// ignore such errors and retry vdiff expecting the condition to be resolved. +func isVDiffRetryable(str string) bool { + for _, s := range []string{"Error while dialing", "failed to connect"} { + if strings.Contains(str, s) { + return true + } + } + return false +} + +type vdiffResult struct { + output string + err error +} + +// execVDiffWithRetry will ignore transient errors that can occur during workflow state changes. +func execVDiffWithRetry(t *testing.T, expectError bool, useVtctldClient bool, args []string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), vdiffRetryTimeout) + defer cancel() + vdiffResultCh := make(chan vdiffResult) + go func() { + var output string + var err error + retry := false + for { + select { + case <-ctx.Done(): + return + default: + } + if retry { + time.Sleep(vdiffRetryInterval) + } + retry = false + if useVtctldClient { + output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) + } else { + output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) + } + if err != nil { + if expectError { + result := vdiffResult{output: output, err: err} + vdiffResultCh <- result + return + } + log.Infof("vdiff error: %s", err) + if isVDiffRetryable(err.Error()) { + retry = true + } else { + result := vdiffResult{output: output, err: err} + vdiffResultCh <- result + return + } + } + if isVDiffRetryable(output) { + retry = true + } + if !retry { + result := vdiffResult{output: output, err: nil} + vdiffResultCh <- result + return + } + } + }() + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out waiting for vdiff to complete") + case result := <-vdiffResultCh: + return result.output, result.err + } +} + type vdiffInfo struct { Workflow, Keyspace string State, Shards string diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 0f6a9f668d0..434ea6db3e0 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -93,12 +93,16 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { time.Sleep(15 * time.Second) // wait for some rows to be inserted. createWorkflow := func(workflowName, tables string) { - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + tabletTypes: "primary", + }, sourceKeyspace: sourceKeyspace, tables: tables, - }, moveTablesFlavorVtctld) + }, workflowFlavorVtctld) mt.Create() waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab, workflowName, "MoveTables") diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go index 6bd0bbb19d8..76a4e627b1b 100644 --- a/go/test/endtoend/vreplication/wrappers_test.go +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -20,28 +20,49 @@ import ( "math/rand" "strconv" + "vitess.io/vitess/go/vt/wrangler" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" ) -type moveTablesFlavor int +type iWorkflow interface { + Create() + Show() + SwitchReads() + SwitchWrites() + SwitchReadsAndWrites() + ReverseReadsAndWrites() + Cancel() + Complete() + Flavor() string +} + +type workflowFlavor int const ( - moveTablesFlavorRandom moveTablesFlavor = iota - moveTablesFlavorVtctl - moveTablesFlavorVtctld + workflowFlavorRandom workflowFlavor = iota + workflowFlavorVtctl + workflowFlavorVtctld ) -var moveTablesFlavors = []moveTablesFlavor{ - moveTablesFlavorVtctl, - moveTablesFlavorVtctld, +var workflowFlavors = []workflowFlavor{ + workflowFlavorVtctl, + workflowFlavorVtctld, } -type moveTables struct { +type workflowInfo struct { vc *VitessCluster workflowName string targetKeyspace string + tabletTypes string +} + +// MoveTables wrappers + +type moveTablesWorkflow struct { + *workflowInfo sourceKeyspace string tables string atomicCopy bool @@ -49,27 +70,19 @@ type moveTables struct { } type iMoveTables interface { - Create() - Show() - SwitchReads() - SwitchWrites() - SwitchReadsAndWrites() - ReverseReadsAndWrites() - Cancel() - Complete() - Flavor() string + iWorkflow } -func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) iMoveTables { +func newMoveTables(vc *VitessCluster, mt *moveTablesWorkflow, flavor workflowFlavor) iMoveTables { mt.vc = vc var mt2 iMoveTables - if flavor == moveTablesFlavorRandom { - flavor = moveTablesFlavors[rand.Intn(len(moveTablesFlavors))] + if flavor == workflowFlavorRandom { + flavor = workflowFlavors[rand.Intn(len(workflowFlavors))] } switch flavor { - case moveTablesFlavorVtctl: + case workflowFlavorVtctl: mt2 = newVtctlMoveTables(mt) - case moveTablesFlavorVtctld: + case workflowFlavorVtctld: mt2 = newVtctldMoveTables(mt) default: panic("unreachable") @@ -79,33 +92,31 @@ func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) i } type VtctlMoveTables struct { - *moveTables + *moveTablesWorkflow } func (vmt *VtctlMoveTables) Flavor() string { return "vtctl" } -func newVtctlMoveTables(mt *moveTables) *VtctlMoveTables { +func newVtctlMoveTables(mt *moveTablesWorkflow) *VtctlMoveTables { return &VtctlMoveTables{mt} } func (vmt *VtctlMoveTables) Create() { - log.Infof("vmt is %+v", vmt.vc, vmt.tables) - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionCreate, "", vmt.sourceShards, "", vmt.atomicCopy) - require.NoError(vmt.vc.t, err) + currentWorkflowType = wrangler.MoveTablesWorkflow + vmt.exec(workflowActionCreate) } func (vmt *VtctlMoveTables) SwitchReadsAndWrites() { err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionSwitchTraffic, "", "", "", vmt.atomicCopy) + vmt.tables, workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(vmt.vc.t, err) } func (vmt *VtctlMoveTables) ReverseReadsAndWrites() { err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionReverseTraffic, "", "", "", vmt.atomicCopy) + vmt.tables, workflowActionReverseTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(vmt.vc.t, err) } @@ -114,6 +125,15 @@ func (vmt *VtctlMoveTables) Show() { panic("implement me") } +func (vmt *VtctlMoveTables) exec(action string) { + options := &workflowExecOptions{ + deferSecondaryKeys: false, + atomicCopy: vmt.atomicCopy, + } + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, action, vmt.tabletTypes, vmt.sourceShards, "", options) + require.NoError(vmt.vc.t, err) +} func (vmt *VtctlMoveTables) SwitchReads() { //TODO implement me panic("implement me") @@ -125,23 +145,20 @@ func (vmt *VtctlMoveTables) SwitchWrites() { } func (vmt *VtctlMoveTables) Cancel() { - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionCancel, "", "", "", vmt.atomicCopy) - require.NoError(vmt.vc.t, err) + vmt.exec(workflowActionCancel) } func (vmt *VtctlMoveTables) Complete() { - //TODO implement me - panic("implement me") + vmt.exec(workflowActionComplete) } var _ iMoveTables = (*VtctldMoveTables)(nil) type VtctldMoveTables struct { - *moveTables + *moveTablesWorkflow } -func newVtctldMoveTables(mt *moveTables) *VtctldMoveTables { +func newVtctldMoveTables(mt *moveTablesWorkflow) *VtctldMoveTables { return &VtctldMoveTables{mt} } @@ -201,6 +218,158 @@ func (v VtctldMoveTables) Cancel() { } func (v VtctldMoveTables) Complete() { + v.exec("Complete") +} + +// Reshard wrappers + +type reshardWorkflow struct { + *workflowInfo + sourceShards string + targetShards string + skipSchemaCopy bool +} + +type iReshard interface { + iWorkflow +} + +func newReshard(vc *VitessCluster, rs *reshardWorkflow, flavor workflowFlavor) iReshard { + rs.vc = vc + var rs2 iReshard + if flavor == workflowFlavorRandom { + flavor = workflowFlavors[rand.Intn(len(workflowFlavors))] + } + switch flavor { + case workflowFlavorVtctl: + rs2 = newVtctlReshard(rs) + case workflowFlavorVtctld: + rs2 = newVtctldReshard(rs) + default: + panic("unreachable") + } + log.Infof("Using reshard flavor: %s", rs2.Flavor()) + return rs2 +} + +type VtctlReshard struct { + *reshardWorkflow +} + +func (vrs *VtctlReshard) Flavor() string { + return "vtctl" +} + +func newVtctlReshard(rs *reshardWorkflow) *VtctlReshard { + return &VtctlReshard{rs} +} + +func (vrs *VtctlReshard) Create() { + currentWorkflowType = wrangler.ReshardWorkflow + vrs.exec(workflowActionCreate) +} + +func (vrs *VtctlReshard) SwitchReadsAndWrites() { + vrs.exec(workflowActionSwitchTraffic) +} + +func (vrs *VtctlReshard) ReverseReadsAndWrites() { + vrs.exec(workflowActionReverseTraffic) +} + +func (vrs *VtctlReshard) Show() { + //TODO implement me + panic("implement me") +} + +func (vrs *VtctlReshard) exec(action string) { + options := &workflowExecOptions{} + err := tstWorkflowExec(vrs.vc.t, "", vrs.workflowName, "", vrs.targetKeyspace, + "", action, vrs.tabletTypes, vrs.sourceShards, vrs.targetShards, options) + require.NoError(vrs.vc.t, err) +} + +func (vrs *VtctlReshard) SwitchReads() { + //TODO implement me + panic("implement me") +} + +func (vrs *VtctlReshard) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (vrs *VtctlReshard) Cancel() { + vrs.exec(workflowActionCancel) +} + +func (vrs *VtctlReshard) Complete() { + vrs.exec(workflowActionComplete) +} + +var _ iReshard = (*VtctldReshard)(nil) + +type VtctldReshard struct { + *reshardWorkflow +} + +func newVtctldReshard(rs *reshardWorkflow) *VtctldReshard { + return &VtctldReshard{rs} +} + +func (v VtctldReshard) Flavor() string { + return "vtctld" +} + +func (v VtctldReshard) exec(args ...string) { + args2 := []string{"Reshard", "--workflow=" + v.workflowName, "--target-keyspace=" + v.targetKeyspace} + args2 = append(args2, args...) + if err := vc.VtctldClient.ExecuteCommand(args2...); err != nil { + v.vc.t.Fatalf("failed to create Reshard workflow: %v", err) + } +} + +func (v VtctldReshard) Create() { + args := []string{"Create"} + if v.sourceShards != "" { + args = append(args, "--source-shards="+v.sourceShards) + } + if v.targetShards != "" { + args = append(args, "--target-shards="+v.targetShards) + } + if v.skipSchemaCopy { + args = append(args, "--skip-schema-copy="+strconv.FormatBool(v.skipSchemaCopy)) + } + v.exec(args...) +} + +func (v VtctldReshard) SwitchReadsAndWrites() { + v.exec("SwitchTraffic") +} + +func (v VtctldReshard) ReverseReadsAndWrites() { + v.exec("ReverseTraffic") +} + +func (v VtctldReshard) Show() { + //TODO implement me + panic("implement me") +} + +func (v VtctldReshard) SwitchReads() { //TODO implement me panic("implement me") } + +func (v VtctldReshard) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (v VtctldReshard) Cancel() { + v.exec("Cancel") +} + +func (v VtctldReshard) Complete() { + v.exec("Complete") +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 3abd24aa0e6..be8876f26d2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -152,8 +152,21 @@ func (vp *vplayer) play(ctx context.Context) error { // The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: // - If set (1), foreign key checks are disabled. // - If unset (0), foreign key checks are enabled. -// updateFKCheck also updates the state for the first row event that this vplayer and hence the connection sees. +// updateFKCheck also updates the state for the first row event that this vplayer, and hence the db connection, sees. func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { + mustUpdate := false + if vp.vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { + // If this is an atomic copy, we must update the foreign_key_checks state even when the vplayer runs during + // the copy phase, i.e., for catchup and fastforward. + mustUpdate = true + } else if vp.vr.state == binlogdatapb.VReplicationWorkflowState_Running { + // If the vreplication workflow is in Running state, we must update the foreign_key_checks + // state for all workflow types. + mustUpdate = true + } + if !mustUpdate { + return nil + } dbForeignKeyChecksEnabled := true if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { dbForeignKeyChecksEnabled = false diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index ccbcee9c3b0..e7700e08079 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -1391,8 +1391,8 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error Filter: filter, }) } - log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s", - source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) + log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s for target %s:%s, uid %d", + source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position, ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 5a3031d7307..1bc76a2868a 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -122,6 +122,7 @@ var ( "vreplication_v2", "vreplication_partial_movetables_basic", "vreplication_partial_movetables_sequences", + "vreplication_foreign_key_stress", "schemadiff_vrepl", "topo_connection_cache", "vtgate_partial_keyspace", diff --git a/test/config.json b/test/config.json index 66657b4f37e..7aafcaf1a80 100644 --- a/test/config.json +++ b/test/config.json @@ -1220,6 +1220,15 @@ "RetryMax": 1, "Tags": [] }, + "vreplication_foreign_key_stress": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestFKExt"], + "Command": [], + "Manual": false, + "Shard": "vreplication_foreign_key_stress", + "RetryMax": 1, + "Tags": [] + }, "vreplication_across_db_versions": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestV2WorkflowsAcrossDBVersions", "-timeout", "20m"], From 3828d431b6d5966765d944a25ab04c7738412be3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Taylor?= Date: Mon, 27 Nov 2023 13:55:49 +0100 Subject: [PATCH 03/33] plabuilder: use OR for not in comparisons (#14607) --- .../vtgate/queries/subquery/subquery_test.go | 25 +++++++++++++++++-- .../vtgate/planbuilder/operators/subquery.go | 5 +++- .../planbuilder/testdata/filter_cases.json | 6 ++--- .../planbuilder/testdata/tpch_cases.json | 2 +- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 371de19dbe6..ae46a99565d 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -19,12 +19,11 @@ package subquery import ( "testing" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -58,6 +57,28 @@ func TestSubqueriesHasValues(t *testing.T) { mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) } +func TestNotINQueries(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + + // Tests NOT IN where the RHS contains all rows, some rows and no rows + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (0,1),(1,2),(2,3),(3,4),(4,5),(5,6)") + // no matching rows + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1 WHERE id2 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) + + // some matching rows + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 3) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1 WHERE id2 > 3) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + + // all rows matching + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1) ORDER BY id2`, `[]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1) ORDER BY id2`, `[[INT64(1)]]`) + +} + // Test only supported in >= v16.0.0 func TestSubqueriesExists(t *testing.T) { utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate") diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index ae28dd8d9c6..a401b29074d 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -255,7 +255,10 @@ func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Ope predicates = append(predicates, sqlparser.NewArgument(hasValuesArg()), rhsPred) sq.SubqueryValueName = sq.ArgName case opcode.PulloutNotIn: - predicates = append(predicates, sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())), rhsPred) + predicates = append(predicates, &sqlparser.OrExpr{ + Left: sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())), + Right: rhsPred, + }) sq.SubqueryValueName = sq.ArgName case opcode.PulloutValue: predicates = append(predicates, rhsPred) diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 2f8f9d73daa..dc8614a8ba7 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -1966,7 +1966,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1", + "Query": "select id from `user` where not :__sq_has_values or id not in ::__sq1", "Table": "`user`" } ] @@ -2503,7 +2503,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1 and :__sq_has_values1 and id in ::__vals", + "Query": "select id from `user` where (not :__sq_has_values or id not in ::__sq1) and :__sq_has_values1 and id in ::__vals", "Table": "`user`", "Values": [ "::__sq2" @@ -2950,7 +2950,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and not :__sq_has_values and id not in ::__sq1", + "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (not :__sq_has_values or id not in ::__sq1)", "Table": "`user`", "Values": [ "5" diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json index 61f602c4e33..a5c144df355 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json @@ -1594,7 +1594,7 @@ "Sharded": true }, "FieldQuery": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where 1 != 1", - "Query": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where not :__sq_has_values and ps_suppkey not in ::__sq1", + "Query": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where not :__sq_has_values or ps_suppkey not in ::__sq1", "Table": "partsupp" } ] From 6f7dfc760b19e1cbe85e28609e6d340beb72346f Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Mon, 27 Nov 2023 14:28:22 +0100 Subject: [PATCH 04/33] evalengine: Fix the min / max calculation for decimals (#14614) Signed-off-by: Dirkjan Bussink --- go/vt/vtgate/evalengine/api_aggregation.go | 2 ++ go/vt/vtgate/evalengine/api_aggregation_test.go | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/go/vt/vtgate/evalengine/api_aggregation.go b/go/vt/vtgate/evalengine/api_aggregation.go index c0d490ced22..05a4f8711ad 100644 --- a/go/vt/vtgate/evalengine/api_aggregation.go +++ b/go/vt/vtgate/evalengine/api_aggregation.go @@ -389,6 +389,7 @@ func (s *aggregationDecimal) Min(value sqltypes.Value) error { } if !s.dec.IsInitialized() || dec.Cmp(s.dec) < 0 { s.dec = dec + s.prec = -dec.Exponent() } return nil } @@ -403,6 +404,7 @@ func (s *aggregationDecimal) Max(value sqltypes.Value) error { } if !s.dec.IsInitialized() || dec.Cmp(s.dec) > 0 { s.dec = dec + s.prec = -dec.Exponent() } return nil } diff --git a/go/vt/vtgate/evalengine/api_aggregation_test.go b/go/vt/vtgate/evalengine/api_aggregation_test.go index aab49541e71..bd3a10547fe 100644 --- a/go/vt/vtgate/evalengine/api_aggregation_test.go +++ b/go/vt/vtgate/evalengine/api_aggregation_test.go @@ -72,6 +72,12 @@ func TestMinMax(t *testing.T) { min: sqltypes.NewVarBinary("a"), max: sqltypes.NewVarBinary("b"), }, + { + type_: sqltypes.Decimal, + values: []sqltypes.Value{sqltypes.NewDecimal("1.001"), sqltypes.NewDecimal("2.1")}, + min: sqltypes.NewDecimal("1.001"), + max: sqltypes.NewDecimal("2.1"), + }, { // accent insensitive type_: sqltypes.VarChar, From f8348c78860c962b2f95a4229e56e9ac2d92d332 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Mon, 27 Nov 2023 16:30:35 +0200 Subject: [PATCH 05/33] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612) Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- go/mysql/flavor_mysql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index 3cc2e08e489..be11126ff9c 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -234,7 +234,7 @@ func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication } } - return fmt.Sprintf("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s', %v)", pos, timeoutSeconds), nil + return fmt.Sprintf("SELECT WAIT_FOR_EXECUTED_GTID_SET('%s', %v)", pos, timeoutSeconds), nil } // readBinlogEvent is part of the Flavor interface. From 5e23ddc2a6391cc7abec2e37357f7a7c34f88dd5 Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Mon, 27 Nov 2023 15:38:23 +0100 Subject: [PATCH 06/33] evalengine: Handle zero dates correctly (#14610) Signed-off-by: Dirkjan Bussink Signed-off-by: Vicent Marti Co-authored-by: Vicent Marti --- config/mycnf/test-suite.cnf | 9 +-- go/mysql/config/config.go | 3 + go/mysql/endtoend/replication_test.go | 4 + .../vrepl_suite/onlineddl_vrepl_suite_test.go | 6 +- .../vrepl/schemadiff_vrepl_suite_test.go | 6 +- go/vt/sidecardb/sidecardb.go | 3 +- go/vt/vtgate/engine/concatenate.go | 22 +++--- go/vt/vtgate/engine/distinct.go | 5 +- go/vt/vtgate/engine/fake_vcursor_test.go | 5 ++ go/vt/vtgate/engine/hash_join.go | 3 +- go/vt/vtgate/engine/primitive.go | 1 + go/vt/vtgate/evalengine/api_coerce.go | 4 +- go/vt/vtgate/evalengine/api_hash.go | 20 ++--- go/vt/vtgate/evalengine/api_hash_test.go | 16 ++-- go/vt/vtgate/evalengine/compiler.go | 5 +- go/vt/vtgate/evalengine/compiler_asm.go | 46 ++--------- go/vt/vtgate/evalengine/compiler_test.go | 8 ++ go/vt/vtgate/evalengine/eval.go | 12 +-- go/vt/vtgate/evalengine/eval_temporal.go | 76 ++++++++++--------- go/vt/vtgate/evalengine/expr_convert.go | 4 +- go/vt/vtgate/evalengine/expr_env.go | 35 ++++++++- go/vt/vtgate/evalengine/expr_logical.go | 4 +- go/vt/vtgate/evalengine/fn_time.go | 68 ++++++++--------- .../evalengine/integration/comparison_test.go | 5 ++ go/vt/vtgate/evalengine/translate.go | 9 ++- go/vt/vtgate/evalengine/weights.go | 10 +-- go/vt/vtgate/evalengine/weights_test.go | 6 +- go/vt/vtgate/vcursor_impl.go | 12 ++- .../vreplication/framework_test.go | 10 +++ .../vreplication/vcopier_test.go | 20 +++-- 30 files changed, 244 insertions(+), 193 deletions(-) create mode 100644 go/mysql/config/config.go diff --git a/config/mycnf/test-suite.cnf b/config/mycnf/test-suite.cnf index e6d0992f6e6..28f4ac16e0d 100644 --- a/config/mycnf/test-suite.cnf +++ b/config/mycnf/test-suite.cnf @@ -1,5 +1,5 @@ # This sets some unsafe settings specifically for -# the test-suite which is currently MySQL 5.7 based +# the test-suite which is currently MySQL 8.0 based # In future it should be renamed testsuite.cnf innodb_buffer_pool_size = 32M @@ -14,13 +14,6 @@ key_buffer_size = 2M sync_binlog=0 innodb_doublewrite=0 -# These two settings are required for the testsuite to pass, -# but enabling them does not spark joy. They should be removed -# in the future. See: -# https://github.com/vitessio/vitess/issues/5396 - -sql_mode = STRICT_TRANS_TABLES - # set a short heartbeat interval in order to detect failures quickly slave_net_timeout = 4 # Disabling `super-read-only`. `test-suite` is mainly used for `vttestserver`. Since `vttestserver` uses a single MySQL for primary and replicas, diff --git a/go/mysql/config/config.go b/go/mysql/config/config.go new file mode 100644 index 00000000000..8abf9d7dc71 --- /dev/null +++ b/go/mysql/config/config.go @@ -0,0 +1,3 @@ +package config + +const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index 0c1fa006347..441209b35f7 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -908,6 +908,10 @@ func TestRowReplicationTypes(t *testing.T) { t.Fatal(err) } defer dConn.Close() + // We have tests for zero dates, so we need to allow that for this session. + if _, err := dConn.ExecuteFetch("SET @@session.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')", 0, false); err != nil { + t.Fatal(err) + } // Set the connection time zone for execution of the // statements to PST. That way we're sure to test the diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index c8b87215036..56818069e05 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -28,6 +28,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" @@ -58,8 +59,7 @@ var ( ) const ( - testDataPath = "testdata" - defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" + testDataPath = "testdata" ) func TestMain(m *testing.M) { @@ -178,7 +178,7 @@ func testSingle(t *testing.T, testName string) { } } - sqlMode := defaultSQLMode + sqlMode := config.DefaultSQLMode if overrideSQLMode, exists := readTestFile(t, testName, "sql_mode"); exists { sqlMode = overrideSQLMode } diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 2dc79840018..055dc7a1df5 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -53,8 +53,8 @@ var ( ) const ( - testDataPath = "../../onlineddl/vrepl_suite/testdata" - defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" + testDataPath = "../../onlineddl/vrepl_suite/testdata" + sqlModeAllowsZeroDate = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" ) type testTableSchema struct { @@ -202,7 +202,7 @@ func testSingle(t *testing.T, testName string) { return } - sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", defaultSQLMode) + sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", sqlModeAllowsZeroDate) _ = mysqlExec(t, sqlModeQuery, "") _ = mysqlExec(t, "set @@global.event_scheduler=0", "") diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index 92d416f9d37..4b8c37039d7 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/history" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql/fakesqldb" @@ -485,7 +486,7 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { sqlModeResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "sql_mode", "varchar"), - "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION", + config.DefaultSQLMode, ) db.AddQuery("select @@session.sql_mode as sql_mode", sqlModeResult) diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go index 1e8cb655547..27b35c32aa8 100644 --- a/go/vt/vtgate/engine/concatenate.go +++ b/go/vt/vtgate/engine/concatenate.go @@ -105,7 +105,7 @@ func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars err = c.coerceAndVisitResults(res, fields, func(result *sqltypes.Result) error { rows = append(rows, result.Rows...) return nil - }) + }, evalengine.ParseSQLMode(vcursor.SQLMode())) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars }, nil } -func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field) error { +func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field, sqlmode evalengine.SQLMode) error { if len(row) != len(fields) { return errWrongNumberOfColumnsInSelect } @@ -126,7 +126,7 @@ func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field) continue } if fields[i].Type != value.Type() { - newValue, err := evalengine.CoerceTo(value, fields[i].Type) + newValue, err := evalengine.CoerceTo(value, fields[i].Type, sqlmode) if err != nil { return err } @@ -228,16 +228,17 @@ func (c *Concatenate) sequentialExec(ctx context.Context, vcursor VCursor, bindV // TryStreamExecute performs a streaming exec. func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool, callback func(*sqltypes.Result) error) error { + sqlmode := evalengine.ParseSQLMode(vcursor.SQLMode()) if vcursor.Session().InTransaction() { // as we are in a transaction, we need to execute all queries inside a single connection, // which holds the single transaction we have - return c.sequentialStreamExec(ctx, vcursor, bindVars, callback) + return c.sequentialStreamExec(ctx, vcursor, bindVars, callback, sqlmode) } // not in transaction, so execute in parallel. - return c.parallelStreamExec(ctx, vcursor, bindVars, callback) + return c.parallelStreamExec(ctx, vcursor, bindVars, callback, sqlmode) } -func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, in func(*sqltypes.Result) error) error { +func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, in func(*sqltypes.Result) error, sqlmode evalengine.SQLMode) error { // Scoped context; any early exit triggers cancel() to clean up ongoing work. ctx, cancel := context.WithCancel(inCtx) defer cancel() @@ -271,7 +272,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, // Apply type coercion if needed. if needsCoercion { for _, row := range res.Rows { - if err := c.coerceValuesTo(row, fields); err != nil { + if err := c.coerceValuesTo(row, fields, sqlmode); err != nil { return err } } @@ -340,7 +341,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, return wg.Wait() } -func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error, sqlmode evalengine.SQLMode) error { // all the below fields ensure that the fields are sent only once. results := make([][]*sqltypes.Result, len(c.Sources)) @@ -374,7 +375,7 @@ func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, return err } for _, res := range results { - if err = c.coerceAndVisitResults(res, fields, callback); err != nil { + if err = c.coerceAndVisitResults(res, fields, callback, sqlmode); err != nil { return err } } @@ -386,6 +387,7 @@ func (c *Concatenate) coerceAndVisitResults( res []*sqltypes.Result, fields []*querypb.Field, callback func(*sqltypes.Result) error, + sqlmode evalengine.SQLMode, ) error { for _, r := range res { if len(r.Rows) > 0 && @@ -402,7 +404,7 @@ func (c *Concatenate) coerceAndVisitResults( } if needsCoercion { for _, row := range r.Rows { - err := c.coerceValuesTo(row, fields) + err := c.coerceValuesTo(row, fields, sqlmode) if err != nil { return err } diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index 2d263464a2e..c7d6742c136 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -46,6 +46,7 @@ type ( probeTable struct { seenRows map[evalengine.HashCode][]sqltypes.Row checkCols []CheckCol + sqlmode evalengine.SQLMode } ) @@ -119,14 +120,14 @@ func (pt *probeTable) hashCodeForRow(inputRow sqltypes.Row) (evalengine.HashCode return 0, vterrors.VT13001("index out of range in row when creating the DISTINCT hash code") } col := inputRow[checkCol.Col] - hashcode, err := evalengine.NullsafeHashcode(col, checkCol.Type.Collation(), col.Type()) + hashcode, err := evalengine.NullsafeHashcode(col, checkCol.Type.Collation(), col.Type(), pt.sqlmode) if err != nil { if err != evalengine.UnsupportedCollationHashError || checkCol.WsCol == nil { return 0, err } checkCol = checkCol.SwitchToWeightString() pt.checkCols[i] = checkCol - hashcode, err = evalengine.NullsafeHashcode(inputRow[checkCol.Col], checkCol.Type.Collation(), col.Type()) + hashcode, err = evalengine.NullsafeHashcode(inputRow[checkCol.Col], checkCol.Type.Collation(), col.Type(), pt.sqlmode) if err != nil { return 0, err } diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index 0ee95a72e60..f9cedd74bfc 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -31,6 +31,7 @@ import ( "github.com/google/go-cmp/cmp" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/key" @@ -135,6 +136,10 @@ func (t *noopVCursor) TimeZone() *time.Location { return nil } +func (t *noopVCursor) SQLMode() string { + return config.DefaultSQLMode +} + func (t *noopVCursor) ExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { return primitive.TryExecute(ctx, t, bindVars, wantfields) } diff --git a/go/vt/vtgate/engine/hash_join.go b/go/vt/vtgate/engine/hash_join.go index 4f205f1bcdc..4e305e8b59d 100644 --- a/go/vt/vtgate/engine/hash_join.go +++ b/go/vt/vtgate/engine/hash_join.go @@ -75,6 +75,7 @@ type ( lhsKey, rhsKey int cols []int hasher vthash.Hasher + sqlmode evalengine.SQLMode } probeTableEntry struct { @@ -283,7 +284,7 @@ func (pt *hashJoinProbeTable) addLeftRow(r sqltypes.Row) error { } func (pt *hashJoinProbeTable) hash(val sqltypes.Value) (vthash.Hash, error) { - err := evalengine.NullsafeHashcode128(&pt.hasher, val, pt.coll, pt.typ) + err := evalengine.NullsafeHashcode128(&pt.hasher, val, pt.coll, pt.typ, pt.sqlmode) if err != nil { return vthash.Hash{}, err } diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index dc1259e2267..833f4cc3b45 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -88,6 +88,7 @@ type ( ConnCollation() collations.ID TimeZone() *time.Location + SQLMode() string ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) diff --git a/go/vt/vtgate/evalengine/api_coerce.go b/go/vt/vtgate/evalengine/api_coerce.go index 143d22ab78c..89b36458198 100644 --- a/go/vt/vtgate/evalengine/api_coerce.go +++ b/go/vt/vtgate/evalengine/api_coerce.go @@ -23,8 +23,8 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -func CoerceTo(value sqltypes.Value, typ sqltypes.Type) (sqltypes.Value, error) { - cast, err := valueToEvalCast(value, value.Type(), collations.Unknown) +func CoerceTo(value sqltypes.Value, typ sqltypes.Type, sqlmode SQLMode) (sqltypes.Value, error) { + cast, err := valueToEvalCast(value, value.Type(), collations.Unknown, sqlmode) if err != nil { return sqltypes.Value{}, err } diff --git a/go/vt/vtgate/evalengine/api_hash.go b/go/vt/vtgate/evalengine/api_hash.go index 209f766840d..3bce100839c 100644 --- a/go/vt/vtgate/evalengine/api_hash.go +++ b/go/vt/vtgate/evalengine/api_hash.go @@ -34,8 +34,8 @@ type HashCode = uint64 // NullsafeHashcode returns an int64 hashcode that is guaranteed to be the same // for two values that are considered equal by `NullsafeCompare`. -func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type) (HashCode, error) { - e, err := valueToEvalCast(v, coerceType, collation) +func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type, sqlmode SQLMode) (HashCode, error) { + e, err := valueToEvalCast(v, coerceType, collation, sqlmode) if err != nil { return 0, err } @@ -75,7 +75,7 @@ var ErrHashCoercionIsNotExact = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, " // for two values that are considered equal by `NullsafeCompare`. // This can be used to avoid having to do comparison checks after a hash, // since we consider the 128 bits of entropy enough to guarantee uniqueness. -func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { +func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type, sqlmode SQLMode) error { switch { case v.IsNull(), sqltypes.IsNull(coerceTo): hash.Write16(hashPrefixNil) @@ -97,7 +97,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case v.IsText(), v.IsBinary(): f, _ = fastparse.ParseFloat64(v.RawStr()) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode) } if err != nil { return err @@ -137,7 +137,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat } neg = i < 0 default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode) } if err != nil { return err @@ -180,7 +180,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat u, err = uint64(fval), nil } default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode) } if err != nil { return err @@ -223,20 +223,20 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode) } hash.Write16(hashPrefixDecimal) dec.Hash(hash) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode) } return nil } -func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { +func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type, sqlmode SQLMode) error { // Slow path to handle all other types. This uses the generic // logic for value casting to ensure we match MySQL here. - e, err := valueToEvalCast(v, coerceTo, collation) + e, err := valueToEvalCast(v, coerceTo, collation, sqlmode) if err != nil { return err } diff --git a/go/vt/vtgate/evalengine/api_hash_test.go b/go/vt/vtgate/evalengine/api_hash_test.go index 0add16de89d..c1e5d880bdd 100644 --- a/go/vt/vtgate/evalengine/api_hash_test.go +++ b/go/vt/vtgate/evalengine/api_hash_test.go @@ -56,10 +56,10 @@ func TestHashCodes(t *testing.T) { require.NoError(t, err) require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) - h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type(), 0) require.NoError(t, err) - h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type(), 0) require.ErrorIs(t, err, tc.err) assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) @@ -82,9 +82,9 @@ func TestHashCodesRandom(t *testing.T) { typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) - hash1, err := NullsafeHashcode(v1, collation, typ) + hash1, err := NullsafeHashcode(v1, collation, typ, 0) require.NoError(t, err) - hash2, err := NullsafeHashcode(v2, collation, typ) + hash2, err := NullsafeHashcode(v2, collation, typ, 0) require.NoError(t, err) if cmp == 0 { equal++ @@ -142,11 +142,11 @@ func TestHashCodes128(t *testing.T) { require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type(), 0) require.NoError(t, err) hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type(), 0) require.ErrorIs(t, err, tc.err) h1 := hasher1.Sum128() @@ -172,10 +172,10 @@ func TestHashCodesRandom128(t *testing.T) { require.NoError(t, err) hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, v1, collation, typ) + err = NullsafeHashcode128(&hasher1, v1, collation, typ, 0) require.NoError(t, err) hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, v2, collation, typ) + err = NullsafeHashcode128(&hasher2, v2, collation, typ, 0) require.NoError(t, err) if cmp == 0 { equal++ diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go index e7d0e962fab..d757b3c3192 100644 --- a/go/vt/vtgate/evalengine/compiler.go +++ b/go/vt/vtgate/evalengine/compiler.go @@ -33,6 +33,7 @@ type compiler struct { collation collations.ID dynamicTypes []ctype asm assembler + sqlmode SQLMode } type CompilerLog interface { @@ -257,7 +258,7 @@ func (c *compiler) compileToDate(doct ctype, offset int) ctype { case sqltypes.Date: return doct default: - c.asm.Convert_xD(offset) + c.asm.Convert_xD(offset, c.sqlmode.AllowZeroDate()) } return ctype{Type: sqltypes.Date, Col: collationBinary, Flag: flagNullable} } @@ -268,7 +269,7 @@ func (c *compiler) compileToDateTime(doct ctype, offset, prec int) ctype { c.asm.Convert_tp(offset, prec) return doct default: - c.asm.Convert_xDT(offset, prec) + c.asm.Convert_xDT(offset, prec, c.sqlmode.AllowZeroDate()) } return ctype{Type: sqltypes.Datetime, Size: int32(prec), Col: collationBinary, Flag: flagNullable} } diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go index 19d4c01a399..cbf9df9c57e 100644 --- a/go/vt/vtgate/evalengine/compiler_asm.go +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -516,7 +516,7 @@ func (asm *assembler) Cmp_ne_n() { }, "CMPFLAG NE [NULL]") } -func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc collations.TypedCollation) { +func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc collations.TypedCollation, allowZeroDate bool) { elseOffset := 0 if hasElse { elseOffset = 1 @@ -529,12 +529,12 @@ func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc coll end := env.vm.sp - elseOffset for sp := env.vm.sp - stackDepth; sp < end; sp += 2 { if env.vm.stack[sp] != nil && env.vm.stack[sp].(*evalInt64).i != 0 { - env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[sp+1], tt, cc.Collation, env.now) + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[sp+1], tt, cc.Collation, env.now, allowZeroDate) goto done } } if elseOffset != 0 { - env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[env.vm.sp-1], tt, cc.Collation, env.now) + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[env.vm.sp-1], tt, cc.Collation, env.now, allowZeroDate) } else { env.vm.stack[env.vm.sp-stackDepth] = nil } @@ -1126,12 +1126,12 @@ func (asm *assembler) Convert_xu(offset int) { }, "CONV (SP-%d), UINT64", offset) } -func (asm *assembler) Convert_xD(offset int) { +func (asm *assembler) Convert_xD(offset int, allowZero bool) { asm.emit(func(env *ExpressionEnv) int { // Need to explicitly check here or we otherwise // store a nil wrapper in an interface vs. a direct // nil. - d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now) + d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now, allowZero) if d == nil { env.vm.stack[env.vm.sp-offset] = nil } else { @@ -1141,27 +1141,12 @@ func (asm *assembler) Convert_xD(offset int) { }, "CONV (SP-%d), DATE", offset) } -func (asm *assembler) Convert_xD_nz(offset int) { +func (asm *assembler) Convert_xDT(offset, prec int, allowZero bool) { asm.emit(func(env *ExpressionEnv) int { // Need to explicitly check here or we otherwise // store a nil wrapper in an interface vs. a direct // nil. - d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now) - if d == nil || d.isZero() { - env.vm.stack[env.vm.sp-offset] = nil - } else { - env.vm.stack[env.vm.sp-offset] = d - } - return 1 - }, "CONV (SP-%d), DATE(NOZERO)", offset) -} - -func (asm *assembler) Convert_xDT(offset, prec int) { - asm.emit(func(env *ExpressionEnv) int { - // Need to explicitly check here or we otherwise - // store a nil wrapper in an interface vs. a direct - // nil. - dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now) + dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now, allowZero) if dt == nil { env.vm.stack[env.vm.sp-offset] = nil } else { @@ -1171,21 +1156,6 @@ func (asm *assembler) Convert_xDT(offset, prec int) { }, "CONV (SP-%d), DATETIME", offset) } -func (asm *assembler) Convert_xDT_nz(offset, prec int) { - asm.emit(func(env *ExpressionEnv) int { - // Need to explicitly check here or we otherwise - // store a nil wrapper in an interface vs. a direct - // nil. - dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now) - if dt == nil || dt.isZero() { - env.vm.stack[env.vm.sp-offset] = nil - } else { - env.vm.stack[env.vm.sp-offset] = dt - } - return 1 - }, "CONV (SP-%d), DATETIME(NOZERO)", offset) -} - func (asm *assembler) Convert_xT(offset, prec int) { asm.emit(func(env *ExpressionEnv) int { t := evalToTime(env.vm.stack[env.vm.sp-offset], prec) @@ -4189,7 +4159,7 @@ func (asm *assembler) Fn_DATEADD_s(unit datetime.IntervalType, sub bool, col col goto baddate } - tmp = evalToTemporal(env.vm.stack[env.vm.sp-2]) + tmp = evalToTemporal(env.vm.stack[env.vm.sp-2], true) if tmp == nil { goto baddate } diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index 77174a01d71..e7b51b41748 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -564,6 +564,14 @@ func TestCompilerSingle(t *testing.T) { expression: `case when null is null then 23 else null end`, result: `INT64(23)`, }, + { + expression: `CAST(0 AS DATE)`, + result: `NULL`, + }, + { + expression: `DAYOFMONTH(0)`, + result: `INT64(0)`, + }, } tz, _ := time.LoadLocation("Europe/Madrid") diff --git a/go/vt/vtgate/evalengine/eval.go b/go/vt/vtgate/evalengine/eval.go index 82f1ec688c7..33312cddc5f 100644 --- a/go/vt/vtgate/evalengine/eval.go +++ b/go/vt/vtgate/evalengine/eval.go @@ -176,7 +176,7 @@ func evalIsTruthy(e eval) boolean { } } -func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (eval, error) { +func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time, allowZero bool) (eval, error) { if e == nil { return nil, nil } @@ -208,9 +208,9 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (ev case sqltypes.Uint8, sqltypes.Uint16, sqltypes.Uint32, sqltypes.Uint64: return evalToInt64(e).toUint64(), nil case sqltypes.Date: - return evalToDate(e, now), nil + return evalToDate(e, now, allowZero), nil case sqltypes.Datetime, sqltypes.Timestamp: - return evalToDateTime(e, -1, now), nil + return evalToDateTime(e, -1, now, allowZero), nil case sqltypes.Time: return evalToTime(e, -1), nil default: @@ -218,7 +218,7 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (ev } } -func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID) (eval, error) { +func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID, sqlmode SQLMode) (eval, error) { switch { case typ == sqltypes.Null: return nil, nil @@ -338,7 +338,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I return nil, err } // Separate return here to avoid nil wrapped in interface type - d := evalToDate(e, time.Now()) + d := evalToDate(e, time.Now(), sqlmode.AllowZeroDate()) if d == nil { return nil, nil } @@ -349,7 +349,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I return nil, err } // Separate return here to avoid nil wrapped in interface type - dt := evalToDateTime(e, -1, time.Now()) + dt := evalToDateTime(e, -1, time.Now(), sqlmode.AllowZeroDate()) if dt == nil { return nil, nil } diff --git a/go/vt/vtgate/evalengine/eval_temporal.go b/go/vt/vtgate/evalengine/eval_temporal.go index d44839a6853..7952fc5aa5d 100644 --- a/go/vt/vtgate/evalengine/eval_temporal.go +++ b/go/vt/vtgate/evalengine/eval_temporal.go @@ -164,11 +164,17 @@ func (e *evalTemporal) addInterval(interval *datetime.Interval, coll collations. return tmp } -func newEvalDateTime(dt datetime.DateTime, l int) *evalTemporal { +func newEvalDateTime(dt datetime.DateTime, l int, allowZero bool) *evalTemporal { + if !allowZero && dt.IsZero() { + return nil + } return &evalTemporal{t: sqltypes.Datetime, dt: dt.Round(l), prec: uint8(l)} } -func newEvalDate(d datetime.Date) *evalTemporal { +func newEvalDate(d datetime.Date, allowZero bool) *evalTemporal { + if !allowZero && d.IsZero() { + return nil + } return &evalTemporal{t: sqltypes.Date, dt: datetime.DateTime{Date: d}} } @@ -185,7 +191,7 @@ func parseDate(s []byte) (*evalTemporal, error) { if !ok { return nil, errIncorrectTemporal("DATE", s) } - return newEvalDate(t), nil + return newEvalDate(t, true), nil } func parseDateTime(s []byte) (*evalTemporal, error) { @@ -193,7 +199,7 @@ func parseDateTime(s []byte) (*evalTemporal, error) { if !ok { return nil, errIncorrectTemporal("DATETIME", s) } - return newEvalDateTime(t, l), nil + return newEvalDateTime(t, l, true), nil } func parseTime(s []byte) (*evalTemporal, error) { @@ -211,56 +217,56 @@ func precision(req, got int) int { return req } -func evalToTemporal(e eval) *evalTemporal { +func evalToTemporal(e eval, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e case *evalBytes: if t, l, ok := datetime.ParseDateTime(e.string(), -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDate(e.string()); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, l, ok := datetime.ParseTime(e.string(), -1); ok { return newEvalTime(t, l) } case *evalInt64: if t, ok := datetime.ParseDateTimeInt64(e.i); ok { - return newEvalDateTime(t, 0) + return newEvalDateTime(t, 0, allowZero) } if d, ok := datetime.ParseDateInt64(e.i); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, ok := datetime.ParseTimeInt64(e.i); ok { return newEvalTime(t, 0) } case *evalUint64: if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { - return newEvalDateTime(t, 0) + return newEvalDateTime(t, 0, allowZero) } if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, ok := datetime.ParseTimeInt64(int64(e.u)); ok { return newEvalTime(t, 0) } case *evalFloat: if t, l, ok := datetime.ParseDateTimeFloat(e.f, -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateFloat(e.f); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, l, ok := datetime.ParseTimeFloat(e.f, -1); ok { return newEvalTime(t, l) } case *evalDecimal: if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateDecimal(e.dec); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if d, l, ok := datetime.ParseTimeDecimal(e.dec, e.length, -1); ok { return newEvalTime(d, l) @@ -271,9 +277,9 @@ func evalToTemporal(e eval) *evalTemporal { return newEvalTime(dt.Time, datetime.DefaultPrecision) } if dt.Time.IsZero() { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } - return newEvalDateTime(dt, datetime.DefaultPrecision) + return newEvalDateTime(dt, datetime.DefaultPrecision, allowZero) } } return nil @@ -326,74 +332,74 @@ func evalToTime(e eval, l int) *evalTemporal { return nil } -func evalToDateTime(e eval, l int, now time.Time) *evalTemporal { +func evalToDateTime(e eval, l int, now time.Time, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e.toDateTime(precision(l, int(e.prec)), now) case *evalBytes: if t, l, _ := datetime.ParseDateTime(e.string(), l); !t.IsZero() { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, _ := datetime.ParseDate(e.string()); !d.IsZero() { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalInt64: if t, ok := datetime.ParseDateTimeInt64(e.i); ok { - return newEvalDateTime(t, precision(l, 0)) + return newEvalDateTime(t, precision(l, 0), allowZero) } if d, ok := datetime.ParseDateInt64(e.i); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalUint64: if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { - return newEvalDateTime(t, precision(l, 0)) + return newEvalDateTime(t, precision(l, 0), allowZero) } if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalFloat: if t, l, ok := datetime.ParseDateTimeFloat(e.f, l); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateFloat(e.f); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalDecimal: if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, l); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateDecimal(e.dec); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalJSON: if dt, ok := e.DateTime(); ok { - return newEvalDateTime(dt, precision(l, datetime.DefaultPrecision)) + return newEvalDateTime(dt, precision(l, datetime.DefaultPrecision), allowZero) } } return nil } -func evalToDate(e eval, now time.Time) *evalTemporal { +func evalToDate(e eval, now time.Time, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e.toDate(now) case *evalBytes: if t, _ := datetime.ParseDate(e.string()); !t.IsZero() { - return newEvalDate(t) + return newEvalDate(t, allowZero) } if dt, _, _ := datetime.ParseDateTime(e.string(), -1); !dt.IsZero() { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } case evalNumeric: if t, ok := datetime.ParseDateInt64(e.toInt64().i); ok { - return newEvalDate(t) + return newEvalDate(t, allowZero) } if dt, ok := datetime.ParseDateTimeInt64(e.toInt64().i); ok { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } case *evalJSON: if d, ok := e.Date(); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } } return nil diff --git a/go/vt/vtgate/evalengine/expr_convert.go b/go/vt/vtgate/evalengine/expr_convert.go index 900d4e37f8f..5b2d82b707f 100644 --- a/go/vt/vtgate/evalengine/expr_convert.go +++ b/go/vt/vtgate/evalengine/expr_convert.go @@ -124,12 +124,12 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { case p > 6: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) } - if dt := evalToDateTime(e, c.Length, env.now); dt != nil { + if dt := evalToDateTime(e, c.Length, env.now, env.sqlmode.AllowZeroDate()); dt != nil { return dt, nil } return nil, nil case "DATE": - if d := evalToDate(e, env.now); d != nil { + if d := evalToDate(e, env.now, env.sqlmode.AllowZeroDate()); d != nil { return d, nil } return nil, nil diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index a6ca1411e74..1c92b0a45ee 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -30,6 +30,7 @@ import ( type VCursor interface { TimeZone() *time.Location GetKeyspace() string + SQLMode() string } type ( @@ -43,9 +44,10 @@ type ( Fields []*querypb.Field // internal state - now time.Time - vc VCursor - user *querypb.VTGateCallerID + now time.Time + vc VCursor + user *querypb.VTGateCallerID + sqlmode SQLMode } ) @@ -121,5 +123,32 @@ func NewExpressionEnv(ctx context.Context, bindVars map[string]*querypb.BindVari env := &ExpressionEnv{BindVars: bindVars, vc: vc} env.user = callerid.ImmediateCallerIDFromContext(ctx) env.SetTime(time.Now()) + if vc != nil { + env.sqlmode = ParseSQLMode(vc.SQLMode()) + } return env } + +const ( + sqlModeParsed = 1 << iota + sqlModeNoZeroDate +) + +type SQLMode uint32 + +func (mode SQLMode) AllowZeroDate() bool { + if mode == 0 { + // default: do not allow zero-date if the sqlmode is not set + return false + } + return (mode & sqlModeNoZeroDate) == 0 +} + +func ParseSQLMode(sqlmode string) SQLMode { + var mode SQLMode + if strings.Contains(sqlmode, "NO_ZERO_DATE") { + mode |= sqlModeNoZeroDate + } + mode |= sqlModeParsed + return mode +} diff --git a/go/vt/vtgate/evalengine/expr_logical.go b/go/vt/vtgate/evalengine/expr_logical.go index f22ca091acb..7fe836d7164 100644 --- a/go/vt/vtgate/evalengine/expr_logical.go +++ b/go/vt/vtgate/evalengine/expr_logical.go @@ -633,7 +633,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { if !matched { return nil, nil } - return evalCoerce(result, ta.result(), ca.result().Collation, env.now) + return evalCoerce(result, ta.result(), ca.result().Collation, env.now, env.sqlmode.AllowZeroDate()) } func (c *CaseExpr) constant() bool { @@ -716,7 +716,7 @@ func (cs *CaseExpr) compile(c *compiler) (ctype, error) { f |= flagNullable } ct := ctype{Type: ta.result(), Flag: f, Col: ca.result()} - c.asm.CmpCase(len(cs.cases), cs.Else != nil, ct.Type, ct.Col) + c.asm.CmpCase(len(cs.cases), cs.Else != nil, ct.Type, ct.Col, c.sqlmode.AllowZeroDate()) return ct, nil } diff --git a/go/vt/vtgate/evalengine/fn_time.go b/go/vt/vtgate/evalengine/fn_time.go index b0e7366ce8f..49a328a852f 100644 --- a/go/vt/vtgate/evalengine/fn_time.go +++ b/go/vt/vtgate/evalengine/fn_time.go @@ -266,7 +266,7 @@ func (b *builtinDateFormat) eval(env *ExpressionEnv) (eval, error) { case *evalTemporal: t = e.toDateTime(datetime.DefaultPrecision, env.now) default: - t = evalToDateTime(date, datetime.DefaultPrecision, env.now) + t = evalToDateTime(date, datetime.DefaultPrecision, env.now, env.sqlmode.AllowZeroDate()) if t == nil || t.isZero() { return nil, nil } @@ -291,7 +291,7 @@ func (call *builtinDateFormat) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Datetime, sqltypes.Date: default: - c.asm.Convert_xDT_nz(1, datetime.DefaultPrecision) + c.asm.Convert_xDT(1, datetime.DefaultPrecision, false) } format, err := call.Arguments[1].compile(c) @@ -359,7 +359,7 @@ func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - dt := evalToDateTime(n, -1, env.now) + dt := evalToDateTime(n, -1, env.now, env.sqlmode.AllowZeroDate()) if dt == nil || dt.isZero() { return nil, nil } @@ -368,7 +368,7 @@ func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { if !ok { return nil, nil } - return newEvalDateTime(out, int(dt.prec)), nil + return newEvalDateTime(out, int(dt.prec), env.sqlmode.AllowZeroDate()), nil } func (call *builtinConvertTz) compile(c *compiler) (ctype, error) { @@ -402,7 +402,7 @@ func (call *builtinConvertTz) compile(c *compiler) (ctype, error) { switch n.Type { case sqltypes.Datetime, sqltypes.Date: default: - c.asm.Convert_xDT_nz(3, -1) + c.asm.Convert_xDT(3, -1, false) } c.asm.Fn_CONVERT_TZ() @@ -418,7 +418,7 @@ func (b *builtinDate) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil { return nil, nil } @@ -436,7 +436,7 @@ func (call *builtinDate) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, c.sqlmode.AllowZeroDate()) } c.asm.jumpDestination(skip) @@ -451,7 +451,7 @@ func (b *builtinDayOfMonth) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -469,7 +469,7 @@ func (call *builtinDayOfMonth) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_DAYOFMONTH() c.asm.jumpDestination(skip) @@ -484,7 +484,7 @@ func (b *builtinDayOfWeek) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -502,7 +502,7 @@ func (call *builtinDayOfWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_DAYOFWEEK() c.asm.jumpDestination(skip) @@ -517,7 +517,7 @@ func (b *builtinDayOfYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -535,7 +535,7 @@ func (call *builtinDayOfYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_DAYOFYEAR() c.asm.jumpDestination(skip) @@ -610,7 +610,7 @@ func (b *builtinFromUnixtime) eval(env *ExpressionEnv) (eval, error) { t = t.In(tz) } - dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec) + dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec, env.sqlmode.AllowZeroDate()) if len(b.Arguments) == 1 { return dt, nil @@ -761,7 +761,7 @@ func (b *builtinMakedate) eval(env *ExpressionEnv) (eval, error) { if t.IsZero() { return nil, nil } - return newEvalDate(datetime.NewDateTimeFromStd(t).Date), nil + return newEvalDate(datetime.NewDateTimeFromStd(t).Date, env.sqlmode.AllowZeroDate()), nil } func (call *builtinMakedate) compile(c *compiler) (ctype, error) { @@ -1102,7 +1102,7 @@ func (b *builtinMonth) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1120,7 +1120,7 @@ func (call *builtinMonth) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_MONTH() c.asm.jumpDestination(skip) @@ -1135,7 +1135,7 @@ func (b *builtinMonthName) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil { return nil, nil } @@ -1158,7 +1158,7 @@ func (call *builtinMonthName) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, c.sqlmode.AllowZeroDate()) } col := typedCoercionCollation(sqltypes.VarChar, call.collate) c.asm.Fn_MONTHNAME(col) @@ -1174,7 +1174,7 @@ func (b *builtinQuarter) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1192,7 +1192,7 @@ func (call *builtinQuarter) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_QUARTER() c.asm.jumpDestination(skip) @@ -1270,7 +1270,7 @@ func dateTimeUnixTimestamp(env *ExpressionEnv, date eval) evalNumeric { case *evalTemporal: dt = e.toDateTime(int(e.prec), env.now) default: - dt = evalToDateTime(date, -1, env.now) + dt = evalToDateTime(date, -1, env.now, env.sqlmode.AllowZeroDate()) if dt == nil || dt.isZero() { var prec int32 switch d := date.(type) { @@ -1351,7 +1351,7 @@ func (call *builtinUnixTimestamp) compile(c *compiler) (ctype, error) { return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil } if lit, ok := call.Arguments[0].(*Literal); ok { - if dt := evalToDateTime(lit.inner, -1, time.Now()); dt != nil { + if dt := evalToDateTime(lit.inner, -1, time.Now(), c.sqlmode.AllowZeroDate()); dt != nil { if dt.prec == 0 { return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil } @@ -1373,7 +1373,7 @@ func (b *builtinWeek) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -1406,7 +1406,7 @@ func (call *builtinWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } if len(call.Arguments) == 1 { @@ -1433,7 +1433,7 @@ func (b *builtinWeekDay) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -1451,7 +1451,7 @@ func (call *builtinWeekDay) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_WEEKDAY() @@ -1467,7 +1467,7 @@ func (b *builtinWeekOfYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -1487,7 +1487,7 @@ func (call *builtinWeekOfYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_WEEKOFYEAR() @@ -1503,7 +1503,7 @@ func (b *builtinYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1522,7 +1522,7 @@ func (call *builtinYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_YEAR() @@ -1539,7 +1539,7 @@ func (b *builtinYearWeek) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil || d.isZero() { return nil, nil } @@ -1572,7 +1572,7 @@ func (call *builtinYearWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } if len(call.Arguments) == 1 { @@ -1624,7 +1624,7 @@ func (call *builtinDateMath) eval(env *ExpressionEnv) (eval, error) { return tmp.addInterval(interval, collations.Unknown, env.now), nil } - if tmp := evalToTemporal(date); tmp != nil { + if tmp := evalToTemporal(date, env.sqlmode.AllowZeroDate()); tmp != nil { return tmp.addInterval(interval, call.collate, env.now), nil } diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index 649dc7b5583..44c02b2a5a5 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/format" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" @@ -215,6 +216,10 @@ func (vc *vcursor) TimeZone() *time.Location { return time.Local } +func (vc *vcursor) SQLMode() string { + return config.DefaultSQLMode +} + func initTimezoneData(t *testing.T, conn *mysql.Conn) { // We load the timezone information into MySQL. The evalengine assumes // our backend MySQL is configured with the timezone information as well diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go index 7993854db36..ea38f116de2 100644 --- a/go/vt/vtgate/evalengine/translate.go +++ b/go/vt/vtgate/evalengine/translate.go @@ -569,6 +569,7 @@ type Config struct { Collation collations.ID NoConstantFolding bool NoCompilation bool + SQLMode SQLMode } func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { @@ -603,7 +604,7 @@ func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { } if len(ast.untyped) == 0 && !cfg.NoCompilation { - comp := compiler{collation: cfg.Collation} + comp := compiler{collation: cfg.Collation, sqlmode: cfg.SQLMode} return comp.compile(expr) } @@ -626,9 +627,9 @@ type typedExpr struct { err error } -func (typed *typedExpr) compile(expr IR, collation collations.ID) (*CompiledExpr, error) { +func (typed *typedExpr) compile(expr IR, collation collations.ID, sqlmode SQLMode) (*CompiledExpr, error) { typed.once.Do(func() { - comp := compiler{collation: collation, dynamicTypes: typed.types} + comp := compiler{collation: collation, dynamicTypes: typed.types, sqlmode: sqlmode} typed.compiled, typed.err = comp.compile(expr) }) return typed.compiled, typed.err @@ -695,7 +696,7 @@ func (u *UntypedExpr) Compile(env *ExpressionEnv) (*CompiledExpr, error) { if err != nil { return nil, err } - return typed.compile(u.ir, u.collation) + return typed.compile(u.ir, u.collation, env.sqlmode) } func (u *UntypedExpr) typeof(env *ExpressionEnv) (ctype, error) { diff --git a/go/vt/vtgate/evalengine/weights.go b/go/vt/vtgate/evalengine/weights.go index fa7fa7e11a6..2a9d6c9f93e 100644 --- a/go/vt/vtgate/evalengine/weights.go +++ b/go/vt/vtgate/evalengine/weights.go @@ -41,11 +41,11 @@ import ( // externally communicates with the `WEIGHT_STRING` function, so that we // can also use this to order / sort other types like Float and Decimal // as well. -func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { +func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int, sqlmode SQLMode) ([]byte, bool, error) { // We optimize here for the case where we already have the desired type. // Otherwise, we fall back to the general evalengine conversion logic. if v.Type() != coerceTo { - return fallbackWeightString(dst, v, coerceTo, col, length, precision) + return fallbackWeightString(dst, v, coerceTo, col, length, precision, sqlmode) } switch { @@ -117,12 +117,12 @@ func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col coll } return j.WeightString(dst), false, nil default: - return fallbackWeightString(dst, v, coerceTo, col, length, precision) + return fallbackWeightString(dst, v, coerceTo, col, length, precision, sqlmode) } } -func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { - e, err := valueToEvalCast(v, coerceTo, col) +func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int, sqlmode SQLMode) ([]byte, bool, error) { + e, err := valueToEvalCast(v, coerceTo, col, sqlmode) if err != nil { return dst, false, err } diff --git a/go/vt/vtgate/evalengine/weights_test.go b/go/vt/vtgate/evalengine/weights_test.go index 0dee4c72d03..7e43315f7df 100644 --- a/go/vt/vtgate/evalengine/weights_test.go +++ b/go/vt/vtgate/evalengine/weights_test.go @@ -136,7 +136,7 @@ func TestWeightStrings(t *testing.T) { items := make([]item, 0, Length) for i := 0; i < Length; i++ { v := tc.gen() - w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec) + w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec, 0) require.NoError(t, err) items = append(items, item{value: v, weight: string(w)}) @@ -156,9 +156,9 @@ func TestWeightStrings(t *testing.T) { a := items[i] b := items[i+1] - v1, err := valueToEvalCast(a.value, typ, tc.col) + v1, err := valueToEvalCast(a.value, typ, tc.col, 0) require.NoError(t, err) - v2, err := valueToEvalCast(b.value, typ, tc.col) + v2, err := valueToEvalCast(b.value, typ, tc.col, 0) require.NoError(t, err) cmp, err := evalCompareNullSafe(v1, v2) diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 9ec4bb0dc03..db678d56354 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -27,10 +27,9 @@ import ( "github.com/google/uuid" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sysvars" - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/discovery" @@ -44,6 +43,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/topo" topoprotopb "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -211,6 +211,12 @@ func (vc *vcursorImpl) TimeZone() *time.Location { return vc.safeSession.TimeZone() } +func (vc *vcursorImpl) SQLMode() string { + // TODO: Implement return the current sql_mode. + // This is currently hardcoded to the default in MySQL 8.0. + return config.DefaultSQLMode +} + // MaxMemoryRows returns the maxMemoryRows flag value. func (vc *vcursorImpl) MaxMemoryRows() int { return maxMemoryRows diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 576ce4c22a8..ee1a1dbc06c 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -29,6 +29,7 @@ import ( "time" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/test/utils" @@ -225,6 +226,15 @@ func execStatements(t *testing.T, queries []string) { } } +func execConnStatements(t *testing.T, conn *dbconnpool.DBConnection, queries []string) { + t.Helper() + for _, query := range queries { + if _, err := conn.ExecuteFetch(query, 10000, false); err != nil { + t.Fatalf("ExecuteFetch(%v) failed: %v", query, err) + } + } +} + //-------------------------------------- // Topos and tablets diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index b960635ff11..0e35036321f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -1676,22 +1676,26 @@ func TestCopyTablesWithInvalidDates(t *testing.T) { func testCopyTablesWithInvalidDates(t *testing.T) { defer deleteTablet(addTablet(100)) - execStatements(t, []string{ - "create table src1(id int, dt date, primary key(id))", - fmt.Sprintf("create table %s.dst1(id int, dt date, primary key(id))", vrepldb), - "insert into src1 values(1, '2020-01-12'), (2, '0000-00-00');", - }) + conn, err := env.Mysqld.GetDbaConnection(context.Background()) + require.NoError(t, err) // default mysql flavor allows invalid dates: so disallow explicitly for this test - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "SET @@global.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')"); err != nil { + if _, err := conn.ExecuteFetch("SET @@session.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')", 0, false); err != nil { fmt.Fprintf(os.Stderr, "%v", err) } defer func() { - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "SET @@global.sql_mode=REPLACE(@@global.sql_mode, ',NO_ZERO_DATE,NO_ZERO_IN_DATE','')"); err != nil { + if _, err := conn.ExecuteFetch("SET @@session.sql_mode=REPLACE(@@session.sql_mode, ',NO_ZERO_DATE,NO_ZERO_IN_DATE','')", 0, false); err != nil { fmt.Fprintf(os.Stderr, "%v", err) } }() - defer execStatements(t, []string{ + + execConnStatements(t, conn, []string{ + "create table src1(id int, dt date, primary key(id))", + fmt.Sprintf("create table %s.dst1(id int, dt date, primary key(id))", vrepldb), + "insert into src1 values(1, '2020-01-12'), (2, '0000-00-00');", + }) + + defer execConnStatements(t, conn, []string{ "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) From 4ddce5f63d7c35a2ed5b90abbe5b660a2c54750d Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Mon, 27 Nov 2023 20:49:59 +0530 Subject: [PATCH 07/33] Add summary changes for recent PRs (#14598) --- changelog/19.0/19.0.0/summary.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/changelog/19.0/19.0.0/summary.md b/changelog/19.0/19.0.0/summary.md index be560d50f58..5580bc6e6f5 100644 --- a/changelog/19.0/19.0.0/summary.md +++ b/changelog/19.0/19.0.0/summary.md @@ -7,6 +7,8 @@ - [VTTablet Flags](#vttablet-flags) - **[Docker](#docker)** - [New MySQL Image](#mysql-image) + - **[VTGate](#vtgate)** + - [`FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable](#fk-checks-vitess-aware) - **[Query Compatibility](#query-compatibility)** - [`SHOW VSCHEMA KEYSPACES` Query](#show-vschema-keyspaces) @@ -31,6 +33,12 @@ This lightweight image is a replacement of `vitess/lite` to only run `mysqld`. Several tags are available to let you choose what version of MySQL you want to use: `vitess/mysql:8.0.30`, `vitess/mysql:8.0.34`. +### VTGate + +#### `FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable + +When VTGate receives a query to change the `FOREIGN_KEY_CHECKS` value for a session, instead of sending the value down to MySQL, VTGate now keeps track of the value and changes the queries by adding `SET_VAR(FOREIGN_KEY_CHECKS=On/Off)` style query optimizer hints wherever required. + ### Query Compatibility #### `SHOW VSCHEMA KEYSPACES` Query @@ -42,11 +50,11 @@ error in the VSchema for the keyspace. An example output of the query looks like - ```sql mysql> show vschema keyspaces; -+---------------+---------+------------------+-------+ -| Keyspace Name | Sharded | Foreign Key Mode | Error | -+---------------+---------+------------------+-------+ -| uks | false | managed | | -| ks | true | managed | | -+---------------+---------+------------------+-------+ -2 rows in set (0.00 sec) ++----------+---------+-------------+---------+ +| Keyspace | Sharded | Foreign Key | Comment | ++----------+---------+-------------+---------+ +| ks | true | managed | | +| uks | false | managed | | ++----------+---------+-------------+---------+ +2 rows in set (0.01 sec) ``` From 61a0f02d1be387f44fd3913316ada5cfd9924973 Mon Sep 17 00:00:00 2001 From: Pedro Kaj Kjellerup Nacht Date: Mon, 27 Nov 2023 12:39:34 -0300 Subject: [PATCH 08/33] Set minimal tokens for auto_approve_pr (#14534) Signed-off-by: Pedro Kaj Kjellerup Nacht --- .github/workflows/auto_approve_pr.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/auto_approve_pr.yml b/.github/workflows/auto_approve_pr.yml index 552f1ec2e68..115e648c3d2 100644 --- a/.github/workflows/auto_approve_pr.yml +++ b/.github/workflows/auto_approve_pr.yml @@ -3,14 +3,20 @@ on: pull_request: types: [opened, reopened] +permissions: + contents: read + jobs: auto_approve: name: Auto Approve Pull Request runs-on: ubuntu-latest + + permissions: + pull-requests: write # only given on local PRs, forks run with `read` access + steps: - name: Checkout code uses: actions/checkout@v3 - - name: Auto Approve Pull Request env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 858f4f8da4dfe1496e504a085ae2e77215a866c4 Mon Sep 17 00:00:00 2001 From: Florent Poinsard <35779988+frouioui@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:19:07 -0600 Subject: [PATCH 09/33] Support unlimited number of ORs in `ExtractINFromOR` (#14566) Signed-off-by: Florent Poinsard --- go/vt/sqlparser/predicate_rewriting.go | 140 ++++++++++++++++---- go/vt/sqlparser/predicate_rewriting_test.go | 35 +++-- 2 files changed, 133 insertions(+), 42 deletions(-) diff --git a/go/vt/sqlparser/predicate_rewriting.go b/go/vt/sqlparser/predicate_rewriting.go index 7bad1b3b82f..234a2f4acd5 100644 --- a/go/vt/sqlparser/predicate_rewriting.go +++ b/go/vt/sqlparser/predicate_rewriting.go @@ -16,6 +16,8 @@ limitations under the License. package sqlparser +import "slices" + // RewritePredicate walks the input AST and rewrites any boolean logic into a simpler form // This simpler form is CNF plus logic for extracting predicates from OR, plus logic for turning ORs into IN func RewritePredicate(ast SQLNode) SQLNode { @@ -204,36 +206,128 @@ func simplifyAnd(expr *AndExpr) (Expr, bool) { return expr, false } -// ExtractINFromOR will add additional predicated to an OR. -// this rewriter should not be used in a fixed point way, since it returns the original expression with additions, -// and it will therefor OOM before it stops rewriting +// ExtractINFromOR rewrites the OR expression into an IN clause. +// Each side of each ORs has to be an equality comparison expression and the column names have to +// match for all sides of each comparison. +// This rewriter takes a query that looks like this WHERE a = 1 and b = 11 or a = 2 and b = 12 or a = 3 and b = 13 +// And rewrite that to WHERE (a, b) IN ((1,11), (2,12), (3,13)) func ExtractINFromOR(expr *OrExpr) []Expr { - // we check if we have two comparisons on either side of the OR - // that we can add as an ANDed comparison. - // WHERE (a = 5 and B) or (a = 6 AND C) => - // WHERE (a = 5 AND B) OR (a = 6 AND C) AND a IN (5,6) - // This rewrite makes it possible to find a better route than Scatter if the `a` column has a helpful vindex - lftPredicates := SplitAndExpression(nil, expr.Left) - rgtPredicates := SplitAndExpression(nil, expr.Right) - var ins []Expr - for _, lft := range lftPredicates { - l, ok := lft.(*ComparisonExpr) - if !ok { - continue + var varNames []*ColName + var values []Exprs + orSlice := orToSlice(expr) + for _, expr := range orSlice { + andSlice := andToSlice(expr) + if len(andSlice) == 0 { + return nil } - for _, rgt := range rgtPredicates { - r, ok := rgt.(*ComparisonExpr) - if !ok { - continue + + var currentVarNames []*ColName + var currentValues []Expr + for _, comparisonExpr := range andSlice { + if comparisonExpr.Operator != EqualOp { + return nil } - in, changed := tryTurningOrIntoIn(l, r) - if changed { - ins = append(ins, in) + + var colName *ColName + if left, ok := comparisonExpr.Left.(*ColName); ok { + colName = left + currentValues = append(currentValues, comparisonExpr.Right) + } + + if right, ok := comparisonExpr.Right.(*ColName); ok { + if colName != nil { + return nil + } + colName = right + currentValues = append(currentValues, comparisonExpr.Left) + } + + if colName == nil { + return nil } + + currentVarNames = append(currentVarNames, colName) + } + + if len(varNames) == 0 { + varNames = currentVarNames + } else if !slices.EqualFunc(varNames, currentVarNames, func(col1, col2 *ColName) bool { return col1.Equal(col2) }) { + return nil } + + values = append(values, currentValues) + } + + var nameTuple ValTuple + for _, name := range varNames { + nameTuple = append(nameTuple, name) + } + + var valueTuple ValTuple + for _, value := range values { + valueTuple = append(valueTuple, ValTuple(value)) + } + + return []Expr{&ComparisonExpr{ + Operator: InOp, + Left: nameTuple, + Right: valueTuple, + }} +} + +func orToSlice(expr *OrExpr) []Expr { + var exprs []Expr + + handleOrSide := func(e Expr) { + switch e := e.(type) { + case *OrExpr: + exprs = append(exprs, orToSlice(e)...) + default: + exprs = append(exprs, e) + } + } + + handleOrSide(expr.Left) + handleOrSide(expr.Right) + return exprs +} + +func andToSlice(expr Expr) []*ComparisonExpr { + var andExpr *AndExpr + switch expr := expr.(type) { + case *AndExpr: + andExpr = expr + case *ComparisonExpr: + return []*ComparisonExpr{expr} + default: + return nil + } + + var exprs []*ComparisonExpr + handleAndSide := func(e Expr) bool { + switch e := e.(type) { + case *AndExpr: + slice := andToSlice(e) + if slice == nil { + return false + } + exprs = append(exprs, slice...) + case *ComparisonExpr: + exprs = append(exprs, e) + default: + return false + } + return true + } + + if !handleAndSide(andExpr.Left) { + return nil + } + if !handleAndSide(andExpr.Right) { + return nil } - return uniquefy(ins) + return exprs } func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, bool) { diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go index e106a56f1aa..a4bbb5f7b5c 100644 --- a/go/vt/sqlparser/predicate_rewriting_test.go +++ b/go/vt/sqlparser/predicate_rewriting_test.go @@ -140,6 +140,18 @@ func TestRewritePredicate(in *testing.T) { // the following two tests show some pathological cases that would grow too much, and so we abort the rewriting in: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", expected: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", + }, { + in: "a = 5 and B or a = 6 and C", + expected: "a in (5, 6) and (a = 5 or C) and ((B or a = 6) and (B or C))", + }, { + in: "(a = 5 and b = 1 or b = 2 and a = 6)", + expected: "(a = 5 or b = 2) and a in (5, 6) and (b in (1, 2) and (b = 1 or a = 6))", + }, { + in: "(a in (1,5) and B or C and a = 6)", + expected: "(a in (1, 5) or C) and a in (1, 5, 6) and ((B or C) and (B or a = 6))", + }, { + in: "(a in (1, 5) and B or C and a in (5, 7))", + expected: "(a in (1, 5) or C) and a in (1, 5, 7) and ((B or C) and (B or a in (5, 7)))", }, { in: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", expected: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", @@ -161,26 +173,11 @@ func TestExtractINFromOR(in *testing.T) { in string expected string }{{ - in: "(A and B) or (B and A)", - expected: "", - }, { - in: "(a = 5 and B) or A", - expected: "", - }, { - in: "a = 5 and B or a = 6 and C", - expected: "a in (5, 6)", - }, { - in: "(a = 5 and b = 1 or b = 2 and a = 6)", - expected: "a in (5, 6) and b in (1, 2)", - }, { - in: "(a in (1,5) and B or C and a = 6)", - expected: "a in (1, 5, 6)", - }, { - in: "(a in (1, 5) and B or C and a in (5, 7))", - expected: "a in (1, 5, 7)", + in: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", + expected: "(a, b) in ((1, 41), (2, 42), (3, 43), (4, 44), (5, 45), (6, 46))", }, { - in: "(a = 5 and b = 1 or b = 2 and a = 6 or b = 3 and a = 4)", - expected: "", + in: "a = 1 or a = 2 or a = 3 or a = 4 or a = 5 or a = 6", + expected: "(a) in ((1), (2), (3), (4), (5), (6))", }} for _, tc := range tests { From 162f3465a9bf4e3884ad15cac999416165ecc452 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Mon, 27 Nov 2023 16:02:39 -0500 Subject: [PATCH 10/33] remove deprecated flags from the codebase (#14544) Signed-off-by: Andrew Mason --- go/cmd/vtctldclient/command/schema.go | 2 -- go/flags/endtoend/vtbench.txt | 6 +++--- go/flags/endtoend/vtclient.txt | 6 +++--- .../onlineddl/revert/onlineddl_revert_test.go | 8 -------- .../tabletgateway/buffer/buffer_test_helpers.go | 1 - go/vt/vtctl/vtctl.go | 1 - go/vt/vtgate/tabletgateway.go | 3 --- go/vt/vtgate/vtgate.go | 10 +--------- go/vt/vttablet/tabletserver/tabletenv/config.go | 6 ------ go/vt/vttablet/tabletserver/throttle/throttler.go | 12 ------------ 10 files changed, 7 insertions(+), 48 deletions(-) diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index 795b1315e89..2d31e3500c1 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -286,8 +286,6 @@ func commandReloadSchemaShard(cmd *cobra.Command, args []string) error { } func init() { - ApplySchema.Flags().Bool("allow-long-unavailability", false, "Deprecated and has no effect.") - ApplySchema.Flags().MarkDeprecated("--allow-long-unavailability", "") ApplySchema.Flags().StringVar(&applySchemaOptions.DDLStrategy, "ddl-strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'.") ApplySchema.Flags().StringSliceVar(&applySchemaOptions.UUIDList, "uuid", nil, "Optional, comma-delimited, repeatable, explicit UUIDs for migration. If given, must match number of DDL changes.") ApplySchema.Flags().StringVar(&applySchemaOptions.MigrationContext, "migration-context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.") diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt index d74dc13ebc8..22066778fe2 100644 --- a/go/flags/endtoend/vtbench.txt +++ b/go/flags/endtoend/vtbench.txt @@ -64,7 +64,7 @@ Flags: --host string VTGate host(s) in the form 'host1,host2,...' --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -78,7 +78,7 @@ Flags: --sql string SQL statement to execute --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --tablet_grpc_ca string the server ca to use to validate servers when connecting --tablet_grpc_cert string the cert to use to connect --tablet_grpc_crl string the server crl to use to validate server certificates when connecting @@ -89,7 +89,7 @@ Flags: --user string Username to connect using mysql (password comes from the db-credentials-file) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vtgate_grpc_ca string the server ca to use to validate servers when connecting --vtgate_grpc_cert string the cert to use to connect --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt index 3d17734168c..e82b2807603 100644 --- a/go/flags/endtoend/vtclient.txt +++ b/go/flags/endtoend/vtclient.txt @@ -28,7 +28,7 @@ Flags: --json Output JSON instead of human-readable table --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -42,11 +42,11 @@ Flags: --qps int queries per second to throttle each thread at. --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string vtgate server to connect to - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --streaming use a streaming query --target string keyspace:shard@tablet_type --timeout duration timeout for queries (default 30s) --use_random_sequence use random sequence for generating [min_sequence_id, max_sequence_id) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index 3220465269e..9d8322f07b9 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -157,14 +157,6 @@ func TestMain(m *testing.M) { "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", "--watch_replication_stream", - // The next flags are deprecated, and we incldue them to verify that they are nonetheless still allowed. - // The values are irrelevant. Just the fact that the flags are allowed in what's important. - // These should be included in v18, and removed in v19. - "--throttle_threshold", "1m", - "--throttle_metrics_query", "select 1 from dual", - "--throttle_metrics_threshold", "1.5", - "--throttle_check_as_check_self=false", - "--throttler-config-via-topo=true", } clusterInstance.VtGateExtraArgs = []string{ "--ddl_strategy", "online", diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 8a3dd4f9b73..5ba71f7ab6e 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -241,7 +241,6 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { "--buffer_window", "10m", "--buffer_max_failover_duration", "10m", "--buffer_min_time_between_failovers", "20m", - "--buffer_implementation", "keyspace_events", "--tablet_refresh_interval", "1s", } clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, bt.VtGateExtraArgs...) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 90e5d0d8ddd..c340485b597 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -2900,7 +2900,6 @@ func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, s } func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - subFlags.MarkDeprecated("allow_long_unavailability", "") sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") ddlStrategy := subFlags.String("ddl_strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index 1be29459bf6..7703f47c4fa 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -49,7 +49,6 @@ var ( // CellsToWatch is the list of cells the healthcheck operates over. If it is empty, only the local cell is watched CellsToWatch string - bufferImplementation = "keyspace_events" initialTabletTimeout = 30 * time.Second // retryCount is the number of times a query will be retried on error retryCount = 2 @@ -58,8 +57,6 @@ var ( func init() { servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { fs.StringVar(&CellsToWatch, "cells_to_watch", "", "comma-separated list of cells for watching tablets") - fs.StringVar(&bufferImplementation, "buffer_implementation", "keyspace_events", "Allowed values: healthcheck (legacy implementation), keyspace_events (default)") - fs.MarkDeprecated("buffer_implementation", "The 'healthcheck' buffer implementation has been removed in v18 and this option will be removed in v19") fs.DurationVar(&initialTabletTimeout, "gateway_initial_tablet_timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") fs.IntVar(&retryCount, "retry-count", 2, "retry count") }) diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index b66ea93226e..a5ace194c2f 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -151,16 +151,8 @@ func registerFlags(fs *pflag.FlagSet) { fs.IntVar(&warmingReadsPercent, "warming-reads-percent", 0, "Percentage of reads on the primary to forward to replicas. Useful for keeping buffer pools warm") fs.IntVar(&warmingReadsConcurrency, "warming-reads-concurrency", 500, "Number of concurrent warming reads allowed") fs.DurationVar(&warmingReadsQueryTimeout, "warming-reads-query-timeout", 5*time.Second, "Timeout of warming read queries") - - _ = fs.String("schema_change_signal_user", "", "User to be used to send down query to vttablet to retrieve schema changes") - _ = fs.MarkDeprecated("schema_change_signal_user", "schema tracking uses an internal api and does not require a user to be specified") - - fs.Int64("gate_query_cache_size", 0, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") - _ = fs.MarkDeprecated("gate_query_cache_size", "`--gate_query_cache_size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") - - fs.Bool("gate_query_cache_lfu", false, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") - _ = fs.MarkDeprecated("gate_query_cache_lfu", "`--gate_query_cache_lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") } + func init() { servenv.OnParseFor("vtgate", registerFlags) servenv.OnParseFor("vtcombo", registerFlags) diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index d490c97326a..ac2629709b9 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -132,14 +132,8 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.IntVar(¤tConfig.StreamBufferSize, "queryserver-config-stream-buffer-size", defaultConfig.StreamBufferSize, "query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size.") - fs.Int("queryserver-config-query-cache-size", 0, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - _ = fs.MarkDeprecated("queryserver-config-query-cache-size", "`--queryserver-config-query-cache-size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") - fs.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - fs.Bool("queryserver-config-query-cache-lfu", false, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") - _ = fs.MarkDeprecated("queryserver-config-query-cache-lfu", "`--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") - currentConfig.SchemaReloadIntervalSeconds = defaultConfig.SchemaReloadIntervalSeconds.Clone() fs.Var(¤tConfig.SchemaReloadIntervalSeconds, currentConfig.SchemaReloadIntervalSeconds.Name(), "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") fs.DurationVar(¤tConfig.SchemaChangeReloadTimeout, "schema-change-reload-timeout", defaultConfig.SchemaChangeReloadTimeout, "query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up") diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 21f9b068d0e..1e03a1dec73 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -82,18 +82,6 @@ func init() { func registerThrottlerFlags(fs *pflag.FlagSet) { fs.StringVar(&throttleTabletTypes, "throttle_tablet_types", throttleTabletTypes, "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' always implicitly included") - - fs.Duration("throttle_threshold", 0, "Replication lag threshold for default lag throttling") - fs.String("throttle_metrics_query", "", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") - fs.Float64("throttle_metrics_threshold", 0, "Override default throttle threshold, respective to --throttle_metrics_query") - fs.Bool("throttle_check_as_check_self", false, "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") - fs.Bool("throttler-config-via-topo", false, "Deprecated, will be removed in v19. Assumed to be 'true'") - - fs.MarkDeprecated("throttle_threshold", "Replication lag threshold for default lag throttling") - fs.MarkDeprecated("throttle_metrics_query", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") - fs.MarkDeprecated("throttle_metrics_threshold", "Override default throttle threshold, respective to --throttle_metrics_query") - fs.MarkDeprecated("throttle_check_as_check_self", "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") - fs.MarkDeprecated("throttler-config-via-topo", "Assumed to be 'true'") } var ( From 2464ab560e1a199920e6f9881782fd869b150f71 Mon Sep 17 00:00:00 2001 From: Deepthi Sigireddi Date: Mon, 27 Nov 2023 13:15:29 -0800 Subject: [PATCH 11/33] logging: log time taken for tablet initialization only once (#14597) Signed-off-by: deepthi --- go/vt/vttablet/tabletserver/state_manager.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 7203174449e..75d1a7c7c3b 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -64,6 +64,7 @@ func (state servingState) String() string { // transitionRetryInterval is for tests. var transitionRetryInterval = 1 * time.Second +var logInitTime sync.Once // stateManager manages state transition for all the TabletServer // subcomponents. @@ -611,9 +612,9 @@ func (sm *stateManager) setTimeBomb() chan struct{} { // setState changes the state and logs the event. func (sm *stateManager) setState(tabletType topodatapb.TabletType, state servingState) { - defer func() { + defer logInitTime.Do(func() { log.Infof("Tablet Init took %d ms", time.Since(servenv.GetInitStartTime()).Milliseconds()) - }() + }) sm.mu.Lock() defer sm.mu.Unlock() if tabletType == topodatapb.TabletType_UNKNOWN { From f9799616fdeaf58d57d97c9ce783b2184424ad83 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:40:57 +0530 Subject: [PATCH 12/33] Make vttablet wait for vt_dba user to be granted privileges (#14565) Signed-off-by: Manan Gupta --- go/cmd/vttablet/cli/cli.go | 9 + go/vt/vttablet/tabletserver/tabletserver.go | 34 +++- .../tabletserver/tabletserver_test.go | 154 ++++++++++++++++++ 3 files changed, 195 insertions(+), 2 deletions(-) diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index 1efa35613d7..2f8b0e5bab3 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -102,6 +102,10 @@ vttablet \ } ) +const ( + dbaGrantWaitTime = 10 * time.Second +) + func run(cmd *cobra.Command, args []string) error { servenv.Init() @@ -244,6 +248,11 @@ func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts } else if enforceTableACLConfig { return nil, fmt.Errorf("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") } + + err := tabletserver.WaitForDBAGrants(config, dbaGrantWaitTime) + if err != nil { + return nil, err + } // creates and registers the query service qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias) servenv.OnRun(func() { diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 6c1a60928de..34b284c9ee6 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -33,16 +33,16 @@ import ( "syscall" "time" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" @@ -232,6 +232,36 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC return tsv } +// WaitForDBAGrants waits for DBA user to have the required privileges to function properly. +func WaitForDBAGrants(config *tabletenv.TabletConfig, waitTime time.Duration) error { + if waitTime == 0 { + return nil + } + timer := time.NewTimer(waitTime) + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + for { + conn, err := dbconnpool.NewDBConnection(ctx, config.DB.DbaConnector()) + if err == nil { + res, fetchErr := conn.ExecuteFetch("SHOW GRANTS", 1000, false) + if fetchErr == nil && res != nil && len(res.Rows) > 0 && len(res.Rows[0]) > 0 { + privileges := res.Rows[0][0].ToString() + // In MySQL 8.0, all the privileges are listed out explicitly, so we can search for SUPER in the output. + // In MySQL 5.7, all the privileges are not listed explicitly, instead ALL PRIVILEGES is written, so we search for that too. + if strings.Contains(privileges, "SUPER") || strings.Contains(privileges, "ALL PRIVILEGES") { + return nil + } + } + } + select { + case <-timer.C: + return fmt.Errorf("waited %v for dba user to have the required permissions", waitTime) + default: + time.Sleep(100 * time.Millisecond) + } + } +} + func (tsv *TabletServer) loadQueryTimeout() time.Duration { return time.Duration(tsv.QueryTimeout.Load()) } diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index d2fb10e5a77..71ad86bf854 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -31,7 +31,10 @@ import ( "time" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/dbconfigs" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/vttest" "vitess.io/vitess/go/vt/callerid" @@ -2626,3 +2629,154 @@ func addTabletServerSupportedQueries(db *fakesqldb.DB) { }}, }) } + +func TestWaitForDBAGrants(t *testing.T) { + tests := []struct { + name string + waitTime time.Duration + errWanted string + setupFunc func(t *testing.T) (*tabletenv.TabletConfig, func()) + }{ + { + name: "Success without any wait", + waitTime: 1 * time.Second, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql instance, and the dba user with required grants. + // Since all the grants already exist, this should pass without any waiting to be needed. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + grantAllPrivilegesToUser(t, cluster.MySQLConnParams(), testUser) + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, + { + name: "Success with wait", + waitTime: 1 * time.Second, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql instance, but delay granting the privileges to the dba user. + // This makes the waitForDBAGrants function retry the grant check. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + go func() { + time.Sleep(500 * time.Millisecond) + grantAllPrivilegesToUser(t, cluster.MySQLConnParams(), testUser) + }() + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, { + name: "Failure due to timeout", + waitTime: 300 * time.Millisecond, + errWanted: "waited 300ms for dba user to have the required permissions", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql but don't give the grants to the vt_dba user at all. + // This should cause a timeout after waiting, since the privileges are never granted. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, { + name: "Empty timeout", + waitTime: 0, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + return nil, func() {} + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config, cleanup := tt.setupFunc(t) + defer cleanup() + err := WaitForDBAGrants(config, tt.waitTime) + if tt.errWanted == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, tt.errWanted) + } + }) + } +} + +// startMySQLAndCreateUser starts a MySQL instance and creates the given user +func startMySQLAndCreateUser(t *testing.T, testUser string) (vttest.LocalCluster, error) { + // Launch MySQL. + // We need a Keyspace in the topology, so the DbName is set. + // We need a Shard too, so the database 'vttest' is created. + cfg := vttest.Config{ + Topology: &vttestpb.VTTestTopology{ + Keyspaces: []*vttestpb.Keyspace{ + { + Name: "vttest", + Shards: []*vttestpb.Shard{ + { + Name: "0", + DbNameOverride: "vttest", + }, + }, + }, + }, + }, + OnlyMySQL: true, + Charset: "utf8mb4", + } + cluster := vttest.LocalCluster{ + Config: cfg, + } + err := cluster.Setup() + if err != nil { + return cluster, nil + } + + connParams := cluster.MySQLConnParams() + conn, err := mysql.Connect(context.Background(), &connParams) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`CREATE USER '%v'@'localhost';`, testUser), 1000, false) + conn.Close() + + return cluster, err +} + +// grantAllPrivilegesToUser grants all the privileges to the user specified. +func grantAllPrivilegesToUser(t *testing.T, connParams mysql.ConnParams, testUser string) { + conn, err := mysql.Connect(context.Background(), &connParams) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`GRANT ALL ON *.* TO '%v'@'localhost';`, testUser), 1000, false) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`GRANT GRANT OPTION ON *.* TO '%v'@'localhost';`, testUser), 1000, false) + require.NoError(t, err) + _, err = conn.ExecuteFetch("FLUSH PRIVILEGES;", 1000, false) + require.NoError(t, err) + conn.Close() +} From 5d4ffd00393e706466ada20e7ba7e4966a85533b Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Tue, 28 Nov 2023 21:28:39 +0100 Subject: [PATCH 13/33] tabletserver: Skip wait for DBA grants for external tablets (#14629) Signed-off-by: Dirkjan Bussink --- go/vt/vttablet/tabletserver/tabletserver.go | 4 ++- .../tabletserver/tabletserver_test.go | 28 ++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 34b284c9ee6..63f06b125d1 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -234,7 +234,9 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC // WaitForDBAGrants waits for DBA user to have the required privileges to function properly. func WaitForDBAGrants(config *tabletenv.TabletConfig, waitTime time.Duration) error { - if waitTime == 0 { + // We don't wait for grants if the tablet is externally managed. Permissions + // are then the responsibility of the DBA. + if config.DB.HasGlobalSettings() || waitTime == 0 { return nil } timer := time.NewTimer(waitTime) diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index 71ad86bf854..0f85e1018f5 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -2706,12 +2706,38 @@ func TestWaitForDBAGrants(t *testing.T) { cluster.TearDown() } }, + }, { + name: "Success for externally managed tablet", + waitTime: 300 * time.Millisecond, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql but don't give the grants to the vt_dba user at all. + // This should cause a timeout after waiting, since the privileges are never granted. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{ + Host: "some.unknown.host", + }, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, }, { name: "Empty timeout", waitTime: 0, errWanted: "", setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { - return nil, func() {} + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + return tc, func() {} }, }, } From 5d49bf998dfd7d8dca737647bd169dac46ac0d15 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:20:27 +0200 Subject: [PATCH 14/33] Materializer: normalize schema via schemadiff on --atomic-copy (#14636) --- go/test/endtoend/vreplication/fk_test.go | 2 +- go/vt/vtctl/workflow/materializer.go | 16 ++++++++++++++++ go/vt/wrangler/materializer.go | 16 ++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 06e94e0222d..4798edfb975 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -88,7 +88,7 @@ func TestFKWorkflow(t *testing.T) { } targetKeyspace := "fktarget" targetTabletId := 200 - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, initialFKSchema, 0, 0, targetTabletId, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) workflowName := "fk" diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go index 6be5ac7f445..52196661eb5 100644 --- a/go/vt/vtctl/workflow/materializer.go +++ b/go/vt/vtctl/workflow/materializer.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/schematools" @@ -444,6 +445,21 @@ func (mz *materializer) deploySchema() error { } if len(applyDDLs) > 0 { + if mz.ms.AtomicCopy { + // AtomicCopy suggests we may be interested in Foreign Key support. As such, we want to + // normalize the source schema: ensure the order of table definitions is compatible with + // the constraints graph. We want to first create the parents, then the children. + // We use schemadiff to normalize the schema. + // For now, and because this is could have wider implications, we ignore any errors in + // reading the source schema. + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + if err != nil { + log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + } else { + applyDDLs = schema.ToQueries() + log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + } + } sql := strings.Join(applyDDLs, ";\n") _, err = mz.tmc.ApplySchema(mz.ctx, targetTablet.Tablet, &tmutils.SchemaChange{ diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 990492bd191..01a3a640fb8 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -39,6 +39,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -1260,6 +1261,21 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if len(applyDDLs) > 0 { + if mz.ms.AtomicCopy { + // AtomicCopy suggests we may be interested in Foreign Key support. As such, we want to + // normalize the source schema: ensure the order of table definitions is compatible with + // the constraints graph. We want to first create the parents, then the children. + // We use schemadiff to normalize the schema. + // For now, and because this is could have wider implications, we ignore any errors in + // reading the source schema. + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + if err != nil { + log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + } else { + applyDDLs = schema.ToQueries() + log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + } + } sql := strings.Join(applyDDLs, ";\n") _, err = mz.wr.tmc.ApplySchema(ctx, targetTablet.Tablet, &tmutils.SchemaChange{ From f892be030b0cc0f9b3a65de6cc05bff0a32fa679 Mon Sep 17 00:00:00 2001 From: Florent Poinsard <35779988+frouioui@users.noreply.github.com> Date: Wed, 29 Nov 2023 07:04:21 -0600 Subject: [PATCH 15/33] Fix license header typo (#14630) Signed-off-by: Florent Poinsard --- go/cmd/topo2topo/cli/plugin_consultopo.go | 2 +- go/cmd/vtclient/cli/vtclient_test.go | 2 +- go/cmd/vtctl/plugin_cephbackupstorage.go | 2 +- go/cmd/vtctl/plugin_s3backupstorage.go | 2 +- go/cmd/vtctld/cli/cli.go | 2 +- go/cmd/vtctld/cli/plugin_cephbackupstorage.go | 2 +- go/cmd/vtctld/cli/plugin_consultopo.go | 2 +- go/cmd/vtctld/cli/plugin_s3backupstorage.go | 2 +- go/cmd/vtctld/cli/plugin_zk2topo.go | 2 +- go/cmd/vtctld/cli/schema.go | 2 +- go/cmd/vtctld/main.go | 2 +- go/cmd/vtgate/cli/cli.go | 2 +- go/cmd/vtgate/cli/plugin_auth_ldap.go | 2 +- go/cmd/vtgate/cli/plugin_auth_static.go | 2 +- go/cmd/vtgate/cli/plugin_auth_vault.go | 2 +- go/cmd/vtgate/cli/plugin_consultopo.go | 2 +- go/cmd/vtgate/cli/plugin_zk2topo.go | 2 +- go/cmd/vtgate/cli/status.go | 2 +- go/cmd/vtorc/cli/plugin_consultopo.go | 2 +- go/cmd/vtorc/cli/plugin_zk2topo.go | 2 +- go/cmd/vttablet/cli/cli.go | 2 +- go/cmd/vttablet/cli/plugin_cephbackupstorage.go | 2 +- go/cmd/vttablet/cli/plugin_consultopo.go | 2 +- go/cmd/vttablet/cli/plugin_s3backupstorage.go | 2 +- go/cmd/vttablet/cli/plugin_sysloglogger.go | 2 +- go/cmd/vttablet/cli/plugin_zk2topo.go | 2 +- go/cmd/vttablet/cli/status.go | 2 +- go/cmd/vttlstest/cli/vttlstest.go | 2 +- go/cmd/vttlstest/vttlstest.go | 2 +- go/cmd/zk/command/add_auth.go | 2 +- go/cmd/zk/command/cat.go | 2 +- go/cmd/zk/command/chmod.go | 2 +- go/cmd/zk/command/cp.go | 2 +- go/cmd/zk/command/edit.go | 2 +- go/cmd/zk/command/ls.go | 2 +- go/cmd/zk/command/rm.go | 2 +- go/cmd/zk/command/root.go | 2 +- go/cmd/zk/command/stat.go | 2 +- go/cmd/zk/command/touch.go | 2 +- go/cmd/zk/command/unzip.go | 2 +- go/cmd/zk/command/wait.go | 2 +- go/cmd/zk/command/watch.go | 2 +- go/cmd/zk/command/zip.go | 2 +- go/cmd/zk/internal/zkfilepath/zkfilepath.go | 2 +- go/cmd/zk/internal/zkfs/zkfs.go | 2 +- go/cmd/zk/zkcmd.go | 2 +- go/event/event_test.go | 2 +- go/event/hooks_test.go | 2 +- go/event/syslogger/fake_logger.go | 2 +- go/event/syslogger/syslogger_test.go | 2 +- go/exit/exit_test.go | 2 +- go/fileutil/wildcards_test.go | 2 +- go/flagutil/enum.go | 2 +- go/flagutil/flagutil.go | 2 +- go/flagutil/flagutil_test.go | 2 +- go/history/history.go | 2 +- go/history/history_test.go | 2 +- go/vt/discovery/utils_test.go | 2 +- go/vt/topo/topotests/shard_watch_test.go | 2 +- go/vt/vttest/plugin_consultopo.go | 2 +- go/vt/vttest/plugin_zk2topo.go | 2 +- 61 files changed, 61 insertions(+), 61 deletions(-) diff --git a/go/cmd/topo2topo/cli/plugin_consultopo.go b/go/cmd/topo2topo/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/topo2topo/cli/plugin_consultopo.go +++ b/go/cmd/topo2topo/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtclient/cli/vtclient_test.go b/go/cmd/vtclient/cli/vtclient_test.go index a5ee571cd0b..a323ab5c63a 100644 --- a/go/cmd/vtclient/cli/vtclient_test.go +++ b/go/cmd/vtclient/cli/vtclient_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctl/plugin_cephbackupstorage.go b/go/cmd/vtctl/plugin_cephbackupstorage.go index 6cd2d5619d0..0d4710ec982 100644 --- a/go/cmd/vtctl/plugin_cephbackupstorage.go +++ b/go/cmd/vtctl/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctl/plugin_s3backupstorage.go b/go/cmd/vtctl/plugin_s3backupstorage.go index a5b5c671ebb..f1d1a454041 100644 --- a/go/cmd/vtctl/plugin_s3backupstorage.go +++ b/go/cmd/vtctl/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go index e5124133adb..f7fef555896 100644 --- a/go/cmd/vtctld/cli/cli.go +++ b/go/cmd/vtctld/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_cephbackupstorage.go b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go index 171198f5e29..7755e1cae2d 100644 --- a/go/cmd/vtctld/cli/plugin_cephbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_consultopo.go b/go/cmd/vtctld/cli/plugin_consultopo.go index 4617d753953..b8f8f2e8cdc 100644 --- a/go/cmd/vtctld/cli/plugin_consultopo.go +++ b/go/cmd/vtctld/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_s3backupstorage.go b/go/cmd/vtctld/cli/plugin_s3backupstorage.go index 4b3ecb33edb..e09f6060809 100644 --- a/go/cmd/vtctld/cli/plugin_s3backupstorage.go +++ b/go/cmd/vtctld/cli/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_zk2topo.go b/go/cmd/vtctld/cli/plugin_zk2topo.go index 77f86d98d52..f1e5f27ea1b 100644 --- a/go/cmd/vtctld/cli/plugin_zk2topo.go +++ b/go/cmd/vtctld/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/schema.go b/go/cmd/vtctld/cli/schema.go index 480679a09e6..68dc47b2b6f 100644 --- a/go/cmd/vtctld/cli/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 6f9ab7384fc..46ce01e409f 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go index 9182bfcf9a4..bcd280890e5 100644 --- a/go/cmd/vtgate/cli/cli.go +++ b/go/cmd/vtgate/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_auth_ldap.go b/go/cmd/vtgate/cli/plugin_auth_ldap.go index 7dc5b246f72..7aab7e9c7f4 100644 --- a/go/cmd/vtgate/cli/plugin_auth_ldap.go +++ b/go/cmd/vtgate/cli/plugin_auth_ldap.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_auth_static.go b/go/cmd/vtgate/cli/plugin_auth_static.go index 9ffd60a79f2..76cdf8318ba 100644 --- a/go/cmd/vtgate/cli/plugin_auth_static.go +++ b/go/cmd/vtgate/cli/plugin_auth_static.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_auth_vault.go b/go/cmd/vtgate/cli/plugin_auth_vault.go index 2aee32e3940..fe5fe2207d4 100644 --- a/go/cmd/vtgate/cli/plugin_auth_vault.go +++ b/go/cmd/vtgate/cli/plugin_auth_vault.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_consultopo.go b/go/cmd/vtgate/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vtgate/cli/plugin_consultopo.go +++ b/go/cmd/vtgate/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_zk2topo.go b/go/cmd/vtgate/cli/plugin_zk2topo.go index 1870a3b2bb3..66d14988c75 100644 --- a/go/cmd/vtgate/cli/plugin_zk2topo.go +++ b/go/cmd/vtgate/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/status.go b/go/cmd/vtgate/cli/status.go index 9652392dc65..d38cac2d50e 100644 --- a/go/cmd/vtgate/cli/status.go +++ b/go/cmd/vtgate/cli/status.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtorc/cli/plugin_consultopo.go b/go/cmd/vtorc/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vtorc/cli/plugin_consultopo.go +++ b/go/cmd/vtorc/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtorc/cli/plugin_zk2topo.go b/go/cmd/vtorc/cli/plugin_zk2topo.go index d71a7e2e196..0b2884cc258 100644 --- a/go/cmd/vtorc/cli/plugin_zk2topo.go +++ b/go/cmd/vtorc/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index 2f8b0e5bab3..bed53d284e8 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_cephbackupstorage.go b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go index 171198f5e29..7755e1cae2d 100644 --- a/go/cmd/vttablet/cli/plugin_cephbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_consultopo.go b/go/cmd/vttablet/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vttablet/cli/plugin_consultopo.go +++ b/go/cmd/vttablet/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_s3backupstorage.go b/go/cmd/vttablet/cli/plugin_s3backupstorage.go index 4b3ecb33edb..e09f6060809 100644 --- a/go/cmd/vttablet/cli/plugin_s3backupstorage.go +++ b/go/cmd/vttablet/cli/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_sysloglogger.go b/go/cmd/vttablet/cli/plugin_sysloglogger.go index a7260d6f8cc..303bf014d83 100644 --- a/go/cmd/vttablet/cli/plugin_sysloglogger.go +++ b/go/cmd/vttablet/cli/plugin_sysloglogger.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_zk2topo.go b/go/cmd/vttablet/cli/plugin_zk2topo.go index d71a7e2e196..0b2884cc258 100644 --- a/go/cmd/vttablet/cli/plugin_zk2topo.go +++ b/go/cmd/vttablet/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/status.go b/go/cmd/vttablet/cli/status.go index 762a9fa646e..de3bfcbce74 100644 --- a/go/cmd/vttablet/cli/status.go +++ b/go/cmd/vttablet/cli/status.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttlstest/cli/vttlstest.go b/go/cmd/vttlstest/cli/vttlstest.go index 4e0f9c2b95e..9791645cdc2 100644 --- a/go/cmd/vttlstest/cli/vttlstest.go +++ b/go/cmd/vttlstest/cli/vttlstest.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttlstest/vttlstest.go b/go/cmd/vttlstest/vttlstest.go index 08e994c096d..8b98687c7a8 100644 --- a/go/cmd/vttlstest/vttlstest.go +++ b/go/cmd/vttlstest/vttlstest.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/add_auth.go b/go/cmd/zk/command/add_auth.go index 566c463f4a8..117ddf1cd8a 100644 --- a/go/cmd/zk/command/add_auth.go +++ b/go/cmd/zk/command/add_auth.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/cat.go b/go/cmd/zk/command/cat.go index 1d5460f7006..6dae3c903a9 100644 --- a/go/cmd/zk/command/cat.go +++ b/go/cmd/zk/command/cat.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/chmod.go b/go/cmd/zk/command/chmod.go index 39125d618c4..63bd103fdb2 100644 --- a/go/cmd/zk/command/chmod.go +++ b/go/cmd/zk/command/chmod.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/cp.go b/go/cmd/zk/command/cp.go index e89486413ea..b45baab1b6e 100644 --- a/go/cmd/zk/command/cp.go +++ b/go/cmd/zk/command/cp.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/edit.go b/go/cmd/zk/command/edit.go index ec4b74c4b62..90348161502 100644 --- a/go/cmd/zk/command/edit.go +++ b/go/cmd/zk/command/edit.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/ls.go b/go/cmd/zk/command/ls.go index 83c1d31363b..5d28f20ae60 100644 --- a/go/cmd/zk/command/ls.go +++ b/go/cmd/zk/command/ls.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/rm.go b/go/cmd/zk/command/rm.go index 5e5b5f4c494..8b710b2fb74 100644 --- a/go/cmd/zk/command/rm.go +++ b/go/cmd/zk/command/rm.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/root.go b/go/cmd/zk/command/root.go index f3f02e7d4f2..2aabcd50e4f 100644 --- a/go/cmd/zk/command/root.go +++ b/go/cmd/zk/command/root.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/stat.go b/go/cmd/zk/command/stat.go index 713a68a3d4e..28aa6bb3465 100644 --- a/go/cmd/zk/command/stat.go +++ b/go/cmd/zk/command/stat.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/touch.go b/go/cmd/zk/command/touch.go index 76c390cf169..53e80a05214 100644 --- a/go/cmd/zk/command/touch.go +++ b/go/cmd/zk/command/touch.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/unzip.go b/go/cmd/zk/command/unzip.go index f4c800e0533..81e04bf4564 100644 --- a/go/cmd/zk/command/unzip.go +++ b/go/cmd/zk/command/unzip.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/wait.go b/go/cmd/zk/command/wait.go index 864f6e83626..8aa844d8bc6 100644 --- a/go/cmd/zk/command/wait.go +++ b/go/cmd/zk/command/wait.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/watch.go b/go/cmd/zk/command/watch.go index eb28cc29ca2..7d6de784718 100644 --- a/go/cmd/zk/command/watch.go +++ b/go/cmd/zk/command/watch.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/zip.go b/go/cmd/zk/command/zip.go index b765f5bb00e..5f06b97c508 100644 --- a/go/cmd/zk/command/zip.go +++ b/go/cmd/zk/command/zip.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/internal/zkfilepath/zkfilepath.go b/go/cmd/zk/internal/zkfilepath/zkfilepath.go index 7febc7a9677..7d5b76cc664 100644 --- a/go/cmd/zk/internal/zkfilepath/zkfilepath.go +++ b/go/cmd/zk/internal/zkfilepath/zkfilepath.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/internal/zkfs/zkfs.go b/go/cmd/zk/internal/zkfs/zkfs.go index 9bab19ec1e4..648c6753fd6 100644 --- a/go/cmd/zk/internal/zkfs/zkfs.go +++ b/go/cmd/zk/internal/zkfs/zkfs.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go index f03ac41c6ef..f0c39d6b0f8 100644 --- a/go/cmd/zk/zkcmd.go +++ b/go/cmd/zk/zkcmd.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/event_test.go b/go/event/event_test.go index 36d4d56bf5d..f7356a98f3e 100644 --- a/go/event/event_test.go +++ b/go/event/event_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/hooks_test.go b/go/event/hooks_test.go index 197dd59a062..3d8e3361e94 100644 --- a/go/event/hooks_test.go +++ b/go/event/hooks_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go index 63c0942c069..852ca2a72a6 100644 --- a/go/event/syslogger/fake_logger.go +++ b/go/event/syslogger/fake_logger.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/syslogger/syslogger_test.go b/go/event/syslogger/syslogger_test.go index 4847fecac2a..fddeef12b27 100644 --- a/go/event/syslogger/syslogger_test.go +++ b/go/event/syslogger/syslogger_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/exit/exit_test.go b/go/exit/exit_test.go index e51a0938534..7f08f4ea1e2 100644 --- a/go/exit/exit_test.go +++ b/go/exit/exit_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/fileutil/wildcards_test.go b/go/fileutil/wildcards_test.go index 9d332a507db..e4494dd7b2c 100644 --- a/go/fileutil/wildcards_test.go +++ b/go/fileutil/wildcards_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/flagutil/enum.go b/go/flagutil/enum.go index 5bc279ee493..4426f13a8b6 100644 --- a/go/flagutil/enum.go +++ b/go/flagutil/enum.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go index ebf4ccef485..28d0b54e4ec 100644 --- a/go/flagutil/flagutil.go +++ b/go/flagutil/flagutil.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/flagutil/flagutil_test.go b/go/flagutil/flagutil_test.go index f95c46a53f7..df2b9bc3bd0 100644 --- a/go/flagutil/flagutil_test.go +++ b/go/flagutil/flagutil_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/history/history.go b/go/history/history.go index 94af347ec52..f38be9239e0 100644 --- a/go/history/history.go +++ b/go/history/history.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/history/history_test.go b/go/history/history_test.go index 8b0559bd47e..3e817b34bb2 100644 --- a/go/history/history_test.go +++ b/go/history/history_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/discovery/utils_test.go b/go/vt/discovery/utils_test.go index 27416da44b0..edca8c17602 100644 --- a/go/vt/discovery/utils_test.go +++ b/go/vt/discovery/utils_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/topo/topotests/shard_watch_test.go b/go/vt/topo/topotests/shard_watch_test.go index 80b696c106d..f4ac83c627a 100644 --- a/go/vt/topo/topotests/shard_watch_test.go +++ b/go/vt/topo/topotests/shard_watch_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/vttest/plugin_consultopo.go b/go/vt/vttest/plugin_consultopo.go index cb10acc2cd2..3d47ee51681 100644 --- a/go/vt/vttest/plugin_consultopo.go +++ b/go/vt/vttest/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/vttest/plugin_zk2topo.go b/go/vt/vttest/plugin_zk2topo.go index 3859454f7bd..7f1f81a5701 100644 --- a/go/vt/vttest/plugin_zk2topo.go +++ b/go/vt/vttest/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and From c368b5172ac8e48467125b12f54af7806e399853 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Wed, 29 Nov 2023 20:03:55 +0530 Subject: [PATCH 16/33] Add log for error to help debug (#14632) --- go/vt/vttablet/tabletserver/tabletserver.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 63f06b125d1..2e1831e0acd 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -246,6 +246,9 @@ func WaitForDBAGrants(config *tabletenv.TabletConfig, waitTime time.Duration) er conn, err := dbconnpool.NewDBConnection(ctx, config.DB.DbaConnector()) if err == nil { res, fetchErr := conn.ExecuteFetch("SHOW GRANTS", 1000, false) + if fetchErr != nil { + log.Errorf("Error running SHOW GRANTS - %v", fetchErr) + } if fetchErr == nil && res != nil && len(res.Rows) > 0 && len(res.Rows[0]) > 0 { privileges := res.Rows[0][0].ToString() // In MySQL 8.0, all the privileges are listed out explicitly, so we can search for SUPER in the output. From b3977186590e699e17fe06cb08085925743a0dca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Taylor?= Date: Wed, 29 Nov 2023 18:15:12 +0100 Subject: [PATCH 17/33] refactor the INSERT engine primitive (#14606) Signed-off-by: Andres Taylor Signed-off-by: Harshit Gangal Co-authored-by: Harshit Gangal --- go/vt/vtgate/engine/cached_size.go | 64 +- go/vt/vtgate/engine/insert.go | 889 ++---------------- go/vt/vtgate/engine/insert_common.go | 480 ++++++++++ go/vt/vtgate/engine/insert_select.go | 423 +++++++++ go/vt/vtgate/engine/insert_test.go | 273 +++--- go/vt/vtgate/planbuilder/insert.go | 29 +- .../planbuilder/operator_transformers.go | 56 +- .../planbuilder/testdata/dml_cases.json | 4 +- 8 files changed, 1224 insertions(+), 994 deletions(-) create mode 100644 go/vt/vtgate/engine/insert_common.go create mode 100644 go/vt/vtgate/engine/insert_select.go diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index 657e5323f05..807c7604412 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -386,10 +386,10 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(240) + size += int64(192) } - // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace - size += cached.Keyspace.CachedSize(true) + // field InsertCommon vitess.io/vitess/go/vt/vtgate/engine.InsertCommon + size += cached.InsertCommon.CachedSize(false) // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) // field VindexValues [][][]vitess.io/vitess/go/vt/vtgate/evalengine.Expr @@ -411,19 +411,6 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } - // field ColVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.ColVindexes)) * int64(8)) - for _, elem := range cached.ColVindexes { - size += elem.CachedSize(true) - } - } - // field TableName string - size += hack.RuntimeAllocSize(int64(len(cached.TableName))) - // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate - size += cached.Generate.CachedSize(true) - // field Prefix string - size += hack.RuntimeAllocSize(int64(len(cached.Prefix))) // field Mid vitess.io/vitess/go/vt/sqlparser.Values { size += hack.RuntimeAllocSize(int64(cap(cached.Mid)) * int64(24)) @@ -438,8 +425,49 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } + return size +} +func (cached *InsertCommon) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(128) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TableName string + size += hack.RuntimeAllocSize(int64(len(cached.TableName))) + // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate + size += cached.Generate.CachedSize(true) + // field ColVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += hack.RuntimeAllocSize(int64(cap(cached.ColVindexes)) * int64(8)) + for _, elem := range cached.ColVindexes { + size += elem.CachedSize(true) + } + } + // field Prefix string + size += hack.RuntimeAllocSize(int64(len(cached.Prefix))) // field Suffix string size += hack.RuntimeAllocSize(int64(len(cached.Suffix))) + return size +} +func (cached *InsertSelect) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(176) + } + // field InsertCommon vitess.io/vitess/go/vt/vtgate/engine.InsertCommon + size += cached.InsertCommon.CachedSize(false) + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } // field VindexValueOffset [][]int { size += hack.RuntimeAllocSize(int64(cap(cached.VindexValueOffset)) * int64(24)) @@ -449,10 +477,6 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } - // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Input.(cachedObject); ok { - size += cc.CachedSize(true) - } return size } diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index 4fa8eeadce4..46043413732 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -18,14 +18,10 @@ package engine import ( "context" - "encoding/json" "fmt" "strconv" "strings" - "sync" - "time" - "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,95 +36,41 @@ import ( var _ Primitive = (*Insert)(nil) -type ( - // Insert represents the instructions to perform an insert operation. - Insert struct { - // Opcode is the execution opcode. - Opcode InsertOpcode +// Insert represents the instructions to perform an insert operation. +type Insert struct { + InsertCommon - // Ignore is for INSERT IGNORE and INSERT...ON DUPLICATE KEY constructs - // for sharded cases. - Ignore bool + // Query specifies the query to be executed. + // For InsertSharded plans, this value is unused, + // and Prefix, Mid and Suffix are used instead. + Query string - // Keyspace specifies the keyspace to send the query to. - Keyspace *vindexes.Keyspace + // VindexValues specifies values for all the vindex columns. + // This is a three-dimensional data structure: + // Insert.Values[i] represents the values to be inserted for the i'th colvindex (i < len(Insert.Table.ColumnVindexes)) + // Insert.Values[i].Values[j] represents values for the j'th column of the given colVindex (j < len(colVindex[i].Columns) + // Insert.Values[i].Values[j].Values[k] represents the value pulled from row k for that column: (k < len(ins.rows)) + VindexValues [][][]evalengine.Expr - // Query specifies the query to be executed. - // For InsertSharded plans, this value is unused, - // and Prefix, Mid and Suffix are used instead. - Query string + // Mid is the row values for the sharded insert plans. + Mid sqlparser.Values - // VindexValues specifies values for all the vindex columns. - // This is a three-dimensional data structure: - // Insert.Values[i] represents the values to be inserted for the i'th colvindex (i < len(Insert.Table.ColumnVindexes)) - // Insert.Values[i].Values[j] represents values for the j'th column of the given colVindex (j < len(colVindex[i].Columns) - // Insert.Values[i].Values[j].Values[k] represents the value pulled from row k for that column: (k < len(ins.rows)) - VindexValues [][][]evalengine.Expr - - // ColVindexes are the vindexes that will use the VindexValues - ColVindexes []*vindexes.ColumnVindex - - // TableName is the name of the table on which row will be inserted. - TableName string - - // Generate is only set for inserts where a sequence must be generated. - Generate *Generate - - // Prefix, Mid and Suffix are for sharded insert plans. - Prefix string - Mid sqlparser.Values - Suffix string - - // Option to override the standard behavior and allow a multi-shard insert - // to use single round trip autocommit. - // - // This is a clear violation of the SQL semantics since it means the statement - // is not atomic in the presence of PK conflicts on one shard and not another. - // However some application use cases would prefer that the statement partially - // succeed in order to get the performance benefits of autocommit. - MultiShardAutocommit bool - - // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query - QueryTimeout int - - // VindexValueOffset stores the offset for each column in the ColumnVindex - // that will appear in the result set of the select query. - VindexValueOffset [][]int - - // Input is a select query plan to retrieve results for inserting data. - Input Primitive `json:",omitempty"` - - // ForceNonStreaming is true when the insert table and select table are same. - // This will avoid locking by the select table. - ForceNonStreaming bool - - PreventAutoCommit bool - - // Insert needs tx handling - txNeeded - } - - ksID = []byte -) - -func (ins *Insert) Inputs() ([]Primitive, []map[string]any) { - if ins.Input == nil { - return nil, nil - } - return []Primitive{ins.Input}, nil + noInputs } -// NewQueryInsert creates an Insert with a query string. -func NewQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { +// newQueryInsert creates an Insert with a query string. +func newQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { return &Insert{ - Opcode: opcode, - Keyspace: keyspace, - Query: query, + InsertCommon: InsertCommon{ + Opcode: opcode, + Keyspace: keyspace, + }, + Query: query, } } -// NewInsert creates a new Insert. -func NewInsert( +// newInsert creates a new Insert. +func newInsert( opcode InsertOpcode, ignore bool, keyspace *vindexes.Keyspace, @@ -139,13 +81,15 @@ func NewInsert( suffix string, ) *Insert { ins := &Insert{ - Opcode: opcode, - Ignore: ignore, - Keyspace: keyspace, + InsertCommon: InsertCommon{ + Opcode: opcode, + Keyspace: keyspace, + Ignore: ignore, + Prefix: prefix, + Suffix: suffix, + }, VindexValues: vindexValues, - Prefix: prefix, Mid: mid, - Suffix: suffix, } if table != nil { ins.TableName = table.Name.String() @@ -159,208 +103,59 @@ func NewInsert( return ins } -// Generate represents the instruction to generate -// a value from a sequence. -type Generate struct { - Keyspace *vindexes.Keyspace - Query string - // Values are the supplied values for the column, which - // will be stored as a list within the expression. New - // values will be generated based on how many were not - // supplied (NULL). - Values evalengine.Expr - // Insert using Select, offset for auto increment column - Offset int -} - -// InsertOpcode is a number representing the opcode -// for the Insert primitive. -type InsertOpcode int - -const ( - // InsertUnsharded is for routing an insert statement - // to an unsharded keyspace. - InsertUnsharded = InsertOpcode(iota) - // InsertSharded is for routing an insert statement - // to individual shards. Requires: A list of Values, one - // for each ColVindex. If the table has an Autoinc column, - // A Generate subplan must be created. - InsertSharded - // InsertSelect is for routing an insert statement - // based on rows returned from the select statement. - InsertSelect -) - -var insName = map[InsertOpcode]string{ - InsertUnsharded: "InsertUnsharded", - InsertSharded: "InsertSharded", - InsertSelect: "InsertSelect", -} - -// String returns the opcode -func (code InsertOpcode) String() string { - return strings.ReplaceAll(insName[code], "Insert", "") -} - -// MarshalJSON serializes the InsertOpcode as a JSON string. -// It's used for testing and diagnostics. -func (code InsertOpcode) MarshalJSON() ([]byte, error) { - return json.Marshal(insName[code]) -} - // RouteType returns a description of the query routing type used by the primitive func (ins *Insert) RouteType() string { return insName[ins.Opcode] } -// GetKeyspaceName specifies the Keyspace that this primitive routes to. -func (ins *Insert) GetKeyspaceName() string { - return ins.Keyspace.Name -} - -// GetTableName specifies the table that this primitive routes to. -func (ins *Insert) GetTableName() string { - return ins.TableName -} - // TryExecute performs a non-streaming exec. -func (ins *Insert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { +func (ins *Insert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) defer cancelFunc() switch ins.Opcode { case InsertUnsharded: - return ins.execInsertUnsharded(ctx, vcursor, bindVars) + return ins.insertIntoUnshardedTable(ctx, vcursor, bindVars) case InsertSharded: - return ins.execInsertSharded(ctx, vcursor, bindVars) - case InsertSelect: - return ins.execInsertFromSelect(ctx, vcursor, bindVars) + return ins.insertIntoShardedTable(ctx, vcursor, bindVars) default: - // Unreachable. - return nil, fmt.Errorf("unsupported query route: %v", ins) + return nil, vterrors.VT13001("unexpected query route: %v", ins.Opcode) } } // TryStreamExecute performs a streaming exec. func (ins *Insert) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - if ins.Input == nil || ins.ForceNonStreaming { - res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) - if err != nil { - return err - } - return callback(res) - } - if ins.QueryTimeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(ins.QueryTimeout)*time.Millisecond) - defer cancel() - } - - unsharded := ins.Opcode == InsertUnsharded - var mu sync.Mutex - output := &sqltypes.Result{} - - err := vcursor.StreamExecutePrimitiveStandalone(ctx, ins.Input, bindVars, false, func(result *sqltypes.Result) error { - if len(result.Rows) == 0 { - return nil - } - - // should process only one chunk at a time. - // as parallel chunk insert will try to use the same transaction in the vttablet - // this will cause transaction in use error. - mu.Lock() - defer mu.Unlock() - - var insertID int64 - var qr *sqltypes.Result - var err error - if unsharded { - insertID, qr, err = ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, result) - } else { - insertID, qr, err = ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) - } - if err != nil { - return err - } - - output.RowsAffected += qr.RowsAffected - // InsertID needs to be updated to the least insertID value in sqltypes.Result - if output.InsertID == 0 || output.InsertID > uint64(insertID) { - output.InsertID = uint64(insertID) - } - return nil - }) + res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) if err != nil { return err } - return callback(output) + return callback(res) } -func (ins *Insert) insertIntoShardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, result *sqltypes.Result) (int64, *sqltypes.Result, error) { - insertID, err := ins.processGenerateFromRows(ctx, vcursor, result.Rows) - if err != nil { - return 0, nil, err - } - - rss, queries, err := ins.getInsertSelectQueries(ctx, vcursor, bindVars, result.Rows) +func (ins *Insert) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + insertID, err := ins.processGenerateFromValues(ctx, vcursor, ins, bindVars) if err != nil { - return 0, nil, err - } - - qr, err := ins.executeInsertQueries(ctx, vcursor, rss, queries, insertID) - if err != nil { - return 0, nil, err - } - return insertID, qr, nil -} - -// GetFields fetches the field info. -func (ins *Insert) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable code for %q", ins.Query) -} - -func (ins *Insert) execInsertUnsharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - query := ins.Query - if ins.Input != nil { - result, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) - if err != nil { - return nil, err - } - if len(result.Rows) == 0 { - return &sqltypes.Result{}, nil - } - query = ins.getInsertQueryForUnsharded(result, bindVars) + return nil, err } - _, qr, err := ins.executeUnshardedTableQuery(ctx, vcursor, bindVars, query) - return qr, err -} - -func (ins *Insert) getInsertQueryForUnsharded(result *sqltypes.Result, bindVars map[string]*querypb.BindVariable) string { - var mids sqlparser.Values - for r, inputRow := range result.Rows { - row := sqlparser.ValTuple{} - for c, value := range inputRow { - bvName := insertVarOffset(r, c) - bindVars[bvName] = sqltypes.ValueBindVariable(value) - row = append(row, sqlparser.NewArgument(bvName)) - } - mids = append(mids, row) - } - return ins.Prefix + sqlparser.String(mids) + ins.Suffix + return ins.executeUnshardedTableQuery(ctx, vcursor, ins, bindVars, ins.Query, uint64(insertID)) } -func (ins *Insert) execInsertSharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - insertID, err := ins.processGenerateFromValues(ctx, vcursor, bindVars) +func (ins *Insert) insertIntoShardedTable( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, +) (*sqltypes.Result, error) { + insertID, err := ins.processGenerateFromValues(ctx, vcursor, ins, bindVars) if err != nil { return nil, err } - rss, queries, err := ins.getInsertShardedRoute(ctx, vcursor, bindVars) + rss, queries, err := ins.getInsertShardedQueries(ctx, vcursor, bindVars) if err != nil { return nil, err } - return ins.executeInsertQueries(ctx, vcursor, rss, queries, insertID) + return ins.executeInsertQueries(ctx, vcursor, rss, queries, uint64(insertID)) } func (ins *Insert) executeInsertQueries( @@ -368,7 +163,7 @@ func (ins *Insert) executeInsertQueries( vcursor VCursor, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, - insertID int64, + insertID uint64, ) (*sqltypes.Result, error) { autocommit := (len(rss) == 1 || ins.MultiShardAutocommit) && vcursor.AutocommitApproval() err := allowOnlyPrimary(rss...) @@ -381,347 +176,51 @@ func (ins *Insert) executeInsertQueries( } if insertID != 0 { - result.InsertID = uint64(insertID) + result.InsertID = insertID } return result, nil } -func (ins *Insert) getInsertSelectQueries( - ctx context.Context, - vcursor VCursor, - bindVars map[string]*querypb.BindVariable, - rows []sqltypes.Row, -) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { - colVindexes := ins.ColVindexes - if len(colVindexes) != len(ins.VindexValueOffset) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") - } - - // Here we go over the incoming rows and extract values for the vindexes we need to update - shardingCols := make([][]sqltypes.Row, len(colVindexes)) - for _, inputRow := range rows { - for colIdx := range colVindexes { - offsets := ins.VindexValueOffset[colIdx] - row := make(sqltypes.Row, 0, len(offsets)) - for _, offset := range offsets { - if offset == -1 { // value not provided from select query - row = append(row, sqltypes.NULL) - continue - } - row = append(row, inputRow[offset]) - } - shardingCols[colIdx] = append(shardingCols[colIdx], row) - } - } - - keyspaceIDs, err := ins.processPrimary(ctx, vcursor, shardingCols[0], colVindexes[0]) - if err != nil { - return nil, nil, err - } - - for vIdx := 1; vIdx < len(colVindexes); vIdx++ { - colVindex := colVindexes[vIdx] - var err error - if colVindex.Owned { - err = ins.processOwned(ctx, vcursor, shardingCols[vIdx], colVindex, keyspaceIDs) - } else { - err = ins.processUnowned(ctx, vcursor, shardingCols[vIdx], colVindex, keyspaceIDs) - } - if err != nil { - return nil, nil, err - } - } - - var indexes []*querypb.Value - var destinations []key.Destination - for i, ksid := range keyspaceIDs { - if ksid != nil { - indexes = append(indexes, &querypb.Value{ - Value: strconv.AppendInt(nil, int64(i), 10), - }) - destinations = append(destinations, key.DestinationKeyspaceID(ksid)) - } - } - if len(destinations) == 0 { - // In this case, all we have is nil KeyspaceIds, we don't do - // anything at all. - return nil, nil, nil - } - - rss, indexesPerRss, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, indexes, destinations) - if err != nil { - return nil, nil, err - } - - queries := make([]*querypb.BoundQuery, len(rss)) - for i := range rss { - bvs := sqltypes.CopyBindVariables(bindVars) // we don't want to create one huge bindvars for all values - var mids sqlparser.Values - for _, indexValue := range indexesPerRss[i] { - index, _ := strconv.Atoi(string(indexValue.Value)) - if keyspaceIDs[index] != nil { - row := sqlparser.ValTuple{} - for colOffset, value := range rows[index] { - bvName := insertVarOffset(index, colOffset) - bvs[bvName] = sqltypes.ValueBindVariable(value) - row = append(row, sqlparser.NewArgument(bvName)) - } - mids = append(mids, row) - } - } - rewritten := ins.Prefix + sqlparser.String(mids) + ins.Suffix - queries[i] = &querypb.BoundQuery{ - Sql: rewritten, - BindVariables: bvs, - } - } - - return rss, queries, nil -} - -func (ins *Insert) execInsertFromSelect(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - // run the SELECT query - if ins.Input == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "something went wrong planning INSERT SELECT") - } - - result, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) - if err != nil { - return nil, err - } - if len(result.Rows) == 0 { - return &sqltypes.Result{}, nil - } - - _, qr, err := ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) - return qr, err -} - -// shouldGenerate determines if a sequence value should be generated for a given value -func shouldGenerate(v sqltypes.Value) bool { - if v.IsNull() { - return true - } - - // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also - // treats 0 as a value that should generate a new sequence. - n, err := v.ToCastUint64() - if err == nil && n == 0 { - return true - } - - return false -} - -// processGenerateFromValues generates new values using a sequence if necessary. -// If no value was generated, it returns 0. Values are generated only -// for cases where none are supplied. -func (ins *Insert) processGenerateFromValues( - ctx context.Context, - vcursor VCursor, - bindVars map[string]*querypb.BindVariable, -) (insertID int64, err error) { - if ins.Generate == nil { - return 0, nil - } - - // Scan input values to compute the number of values to generate, and - // keep track of where they should be filled. - env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) - resolved, err := env.Evaluate(ins.Generate.Values) - if err != nil { - return 0, err - } - count := int64(0) - values := resolved.TupleValues() - for _, val := range values { - if shouldGenerate(val) { - count++ - } - } - - // If generation is needed, generate the requested number of values (as one call). - if count != 0 { - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) - if err != nil { - return 0, err - } - if len(rss) != 1 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) - } - bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)} - qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0]) - if err != nil { - return 0, err - } - // If no rows are returned, it's an internal error, and the code - // must panic, which will be caught and reported. - insertID, err = qr.Rows[0][0].ToCastInt64() - if err != nil { - return 0, err - } - } - - // Fill the holes where no value was supplied. - cur := insertID - for i, v := range values { - if shouldGenerate(v) { - bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.Int64BindVariable(cur) - cur++ - } else { - bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.ValueBindVariable(v) - } - } - return insertID, nil -} - -// processGenerateFromRows generates new values using a sequence if necessary. -// If no value was generated, it returns 0. Values are generated only -// for cases where none are supplied. -func (ins *Insert) processGenerateFromRows( - ctx context.Context, - vcursor VCursor, - rows []sqltypes.Row, -) (insertID int64, err error) { - if ins.Generate == nil { - return 0, nil - } - var count int64 - offset := ins.Generate.Offset - genColPresent := offset < len(rows[0]) - if genColPresent { - for _, val := range rows { - if val[offset].IsNull() { - count++ - } - } - } else { - count = int64(len(rows)) - } - - if count == 0 { - return 0, nil - } - - // If generation is needed, generate the requested number of values (as one call). - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) - if err != nil { - return 0, err - } - if len(rss) != 1 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) - } - bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)} - qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0]) - if err != nil { - return 0, err - } - // If no rows are returned, it's an internal error, and the code - // must panic, which will be caught and reported. - insertID, err = qr.Rows[0][0].ToCastInt64() - if err != nil { - return 0, err - } - - used := insertID - for idx, val := range rows { - if genColPresent { - if val[offset].IsNull() { - val[offset] = sqltypes.NewInt64(used) - used++ - } - } else { - rows[idx] = append(val, sqltypes.NewInt64(used)) - used++ - } - } - - return insertID, nil -} - -// getInsertShardedRoute performs all the vindex related work +// getInsertShardedQueries performs all the vindex related work // and returns a map of shard to queries. // Using the primary vindex, it computes the target keyspace ids. // For owned vindexes, it creates entries. // For unowned vindexes with no input values, it reverse maps. // For unowned vindexes with values, it validates. // If it's an IGNORE or ON DUPLICATE key insert, it drops unroutable rows. -func (ins *Insert) getInsertShardedRoute( +func (ins *Insert) getInsertShardedQueries( ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, ) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { + // vindexRowsValues builds the values of all vindex columns. // the 3-d structure indexes are colVindex, row, col. Note that // ins.Values indexes are colVindex, col, row. So, the conversion // involves a transpose. // The reason we need to transpose is that all the Vindex APIs // require inputs in that format. - vindexRowsValues := make([][]sqltypes.Row, len(ins.VindexValues)) - rowCount := 0 - env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) - colVindexes := ins.ColVindexes - for vIdx, vColValues := range ins.VindexValues { - if len(vColValues) != len(colVindexes[vIdx].Columns) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) - } - for colIdx, colValues := range vColValues { - rowsResolvedValues := make(sqltypes.Row, 0, len(colValues)) - for _, colValue := range colValues { - result, err := env.Evaluate(colValue) - if err != nil { - return nil, nil, err - } - rowsResolvedValues = append(rowsResolvedValues, result.Value(vcursor.ConnCollation())) - } - // This is the first iteration: allocate for transpose. - if colIdx == 0 { - if len(rowsResolvedValues) == 0 { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] rowcount is zero for inserts: %v", rowsResolvedValues) - } - if rowCount == 0 { - rowCount = len(rowsResolvedValues) - } - if rowCount != len(rowsResolvedValues) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) - } - vindexRowsValues[vIdx] = make([]sqltypes.Row, rowCount) - } - // Perform the transpose. - for rowNum, colVal := range rowsResolvedValues { - vindexRowsValues[vIdx][rowNum] = append(vindexRowsValues[vIdx][rowNum], colVal) - } - } + vindexRowsValues, err := ins.buildVindexRowsValues(ctx, vcursor, bindVars) + if err != nil { + return nil, nil, err } // The output from the following 'process' functions is a list of // keyspace ids. For regular inserts, a failure to find a route // results in an error. For 'ignore' type inserts, the keyspace // id is returned as nil, which is used later to drop the corresponding rows. - if len(vindexRowsValues) == 0 || len(colVindexes) == 0 { + if len(vindexRowsValues) == 0 || len(ins.ColVindexes) == 0 { return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.TableName) } - keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) + + keyspaceIDs, err := ins.processVindexes(ctx, vcursor, vindexRowsValues) if err != nil { return nil, nil, err } - for vIdx := 1; vIdx < len(colVindexes); vIdx++ { - colVindex := colVindexes[vIdx] - var err error - if colVindex.Owned { - err = ins.processOwned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) - } else { - err = ins.processUnowned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) - } - if err != nil { - return nil, nil, err - } - } - // Build 3-d bindvars. Skip rows with nil keyspace ids in case // we're executing an insert ignore. - for vIdx, colVindex := range colVindexes { + for vIdx, colVindex := range ins.ColVindexes { for rowNum, rowColumnKeys := range vindexRowsValues[vIdx] { if keyspaceIDs[rowNum] == nil { // InsertIgnore: skip the row. @@ -794,174 +293,50 @@ func (ins *Insert) getInsertShardedRoute( return rss, queries, nil } -// processPrimary maps the primary vindex values to the keyspace ids. -func (ins *Insert) processPrimary(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex) ([]ksID, error) { - destinations, err := vindexes.Map(ctx, colVindex.Vindex, vcursor, vindexColumnsKeys) - if err != nil { - return nil, err - } - - keyspaceIDs := make([]ksID, len(destinations)) - for i, destination := range destinations { - switch d := destination.(type) { - case key.DestinationKeyspaceID: - // This is a single keyspace id, we're good. - keyspaceIDs[i] = d - case key.DestinationNone: - // No valid keyspace id, we may return an error. - if !ins.Ignore { - return nil, fmt.Errorf("could not map %v to a keyspace id", vindexColumnsKeys[i]) - } - default: - return nil, fmt.Errorf("could not map %v to a unique keyspace id: %v", vindexColumnsKeys[i], destination) - } - } - - return keyspaceIDs, nil -} - -// processOwned creates vindex entries for the values of an owned column. -func (ins *Insert) processOwned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { - if !ins.Ignore { - return colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, vindexColumnsKeys, ksids, false /* ignoreMode */) - } - - // InsertIgnore - var createIndexes []int - var createKeys []sqltypes.Row - var createKsids []ksID - - for rowNum, rowColumnKeys := range vindexColumnsKeys { - if ksids[rowNum] == nil { - continue - } - createIndexes = append(createIndexes, rowNum) - createKeys = append(createKeys, rowColumnKeys) - createKsids = append(createKsids, ksids[rowNum]) - } - if createKeys == nil { - return nil - } - - err := colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, createKeys, createKsids, true) - if err != nil { - return err - } - // After creation, verify that the keys map to the keyspace ids. If not, remove - // those that don't map. - verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, createKeys, createKsids) - if err != nil { - return err - } - for i, v := range verified { - if !v { - ksids[createIndexes[i]] = nil - } - } - return nil -} - -// processUnowned either reverse maps or validates the values for an unowned column. -func (ins *Insert) processUnowned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { - var reverseIndexes []int - var reverseKsids []ksID - - var verifyIndexes []int - var verifyKeys []sqltypes.Row - var verifyKsids []ksID - - // Check if this VIndex is reversible or not. - reversibleVindex, isReversible := colVindex.Vindex.(vindexes.Reversible) - - for rowNum, rowColumnKeys := range vindexColumnsKeys { - // If we weren't able to determine a keyspace id from the primary VIndex, skip this row - if ksids[rowNum] == nil { - continue +func (ins *Insert) buildVindexRowsValues(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([][]sqltypes.Row, error) { + vindexRowsValues := make([][]sqltypes.Row, len(ins.VindexValues)) + rowCount := 0 + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + colVindexes := ins.ColVindexes + for vIdx, vColValues := range ins.VindexValues { + if len(vColValues) != len(colVindexes[vIdx].Columns) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) } - - if rowColumnKeys[0].IsNull() { - // If the value of the column is `NULL`, but this is a reversible VIndex, - // we will try to generate the value from the keyspace id generated by the primary VIndex. - if isReversible { - reverseIndexes = append(reverseIndexes, rowNum) - reverseKsids = append(reverseKsids, ksids[rowNum]) + for colIdx, colValues := range vColValues { + rowsResolvedValues := make(sqltypes.Row, 0, len(colValues)) + for _, colValue := range colValues { + result, err := env.Evaluate(colValue) + if err != nil { + return nil, err + } + rowsResolvedValues = append(rowsResolvedValues, result.Value(vcursor.ConnCollation())) } - - // Otherwise, don't do anything. Whether `NULL` is a valid value for this column will be - // handled by MySQL. - } else { - // If a value for this column was specified, the keyspace id values from the - // secondary VIndex need to be verified against the keyspace id from the primary VIndex - verifyIndexes = append(verifyIndexes, rowNum) - verifyKeys = append(verifyKeys, rowColumnKeys) - verifyKsids = append(verifyKsids, ksids[rowNum]) - } - } - - // Reverse map values for secondary VIndex columns from the primary VIndex's keyspace id. - if reverseKsids != nil { - reverseKeys, err := reversibleVindex.ReverseMap(vcursor, reverseKsids) - if err != nil { - return err - } - - for i, reverseKey := range reverseKeys { - // Fill the first column with the reverse-mapped value. - vindexColumnsKeys[reverseIndexes[i]][0] = reverseKey - } - } - - // Verify that the keyspace ids generated by the primary and secondary VIndexes match - if verifyIndexes != nil { - // If values were supplied, we validate against keyspace id. - verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, verifyKeys, verifyKsids) - if err != nil { - return err - } - - var mismatchVindexKeys []sqltypes.Row - for i, v := range verified { - rowNum := verifyIndexes[i] - if !v { - if !ins.Ignore { - mismatchVindexKeys = append(mismatchVindexKeys, vindexColumnsKeys[rowNum]) - continue + // This is the first iteration: allocate for transpose. + if colIdx == 0 { + if len(rowsResolvedValues) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] rowcount is zero for inserts: %v", rowsResolvedValues) } - - // Skip the whole row if this is a `INSERT IGNORE` or `INSERT ... ON DUPLICATE KEY ...` statement - // but the keyspace ids didn't match. - ksids[verifyIndexes[i]] = nil + if rowCount == 0 { + rowCount = len(rowsResolvedValues) + } + if rowCount != len(rowsResolvedValues) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) + } + vindexRowsValues[vIdx] = make([]sqltypes.Row, rowCount) + } + // Perform the transpose. + for rowNum, colVal := range rowsResolvedValues { + vindexRowsValues[vIdx][rowNum] = append(vindexRowsValues[vIdx][rowNum], colVal) } - } - - if mismatchVindexKeys != nil { - return fmt.Errorf("values %v for column %v does not map to keyspace ids", mismatchVindexKeys, colVindex.Columns) } } - - return nil -} - -// InsertVarName returns a name for the bind var for this column. This method is used by the planner and engine, -// to make sure they both produce the same names -func InsertVarName(col sqlparser.IdentifierCI, rowNum int) string { - return fmt.Sprintf("_%s_%d", col.CompliantName(), rowNum) -} - -func insertVarOffset(rowNum, colOffset int) string { - return fmt.Sprintf("_c%d_%d", rowNum, colOffset) + return vindexRowsValues, nil } func (ins *Insert) description() PrimitiveDescription { - other := map[string]any{ - "Query": ins.Query, - "TableName": ins.GetTableName(), - "MultiShardAutocommit": ins.MultiShardAutocommit, - "QueryTimeout": ins.QueryTimeout, - "InsertIgnore": ins.Ignore, - "InputAsNonStreaming": ins.ForceNonStreaming, - "NoAutoCommit": ins.PreventAutoCommit, - } + other := ins.commonDesc() + other["Query"] = ins.Query + other["TableName"] = ins.GetTableName() if len(ins.VindexValues) > 0 { valuesOffsets := map[string]string{} @@ -984,35 +359,6 @@ func (ins *Insert) description() PrimitiveDescription { other["VindexValues"] = valuesOffsets } - if ins.Generate != nil { - if ins.Generate.Values == nil { - other["AutoIncrement"] = fmt.Sprintf("%s:Offset(%d)", ins.Generate.Query, ins.Generate.Offset) - } else { - other["AutoIncrement"] = fmt.Sprintf("%s:Values::%s", ins.Generate.Query, sqlparser.String(ins.Generate.Values)) - } - } - - if len(ins.VindexValueOffset) > 0 { - valuesOffsets := map[string]string{} - for idx, ints := range ins.VindexValueOffset { - if len(ins.ColVindexes) < idx { - panic("ins.ColVindexes and ins.VindexValueOffset do not line up") - } - vindex := ins.ColVindexes[idx] - marshal, _ := json.Marshal(ints) - valuesOffsets[vindex.Name] = string(marshal) - } - other["VindexOffsetFromSelect"] = valuesOffsets - } - if len(ins.Mid) > 0 { - mids := slice.Map(ins.Mid, func(from sqlparser.ValTuple) string { - return sqlparser.String(from) - }) - shardQuery := fmt.Sprintf("%s%s%s", ins.Prefix, strings.Join(mids, ", "), ins.Suffix) - if shardQuery != ins.Query { - other["ShardedQuery"] = shardQuery - } - } return PrimitiveDescription{ OperatorType: "Insert", Keyspace: ins.Keyspace, @@ -1022,41 +368,8 @@ func (ins *Insert) description() PrimitiveDescription { } } -func (ins *Insert) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, result *sqltypes.Result) (int64, *sqltypes.Result, error) { - query := ins.getInsertQueryForUnsharded(result, bindVars) - return ins.executeUnshardedTableQuery(ctx, vcursor, bindVars, query) -} - -func (ins *Insert) executeUnshardedTableQuery(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, query string) (int64, *sqltypes.Result, error) { - insertID, err := ins.processGenerateFromValues(ctx, vcursor, bindVars) - if err != nil { - return 0, nil, err - } - - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) - if err != nil { - return 0, nil, err - } - if len(rss) != 1 { - return 0, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) - } - err = allowOnlyPrimary(rss...) - if err != nil { - return 0, nil, err - } - qr, err := execShard(ctx, ins, vcursor, query, bindVars, rss[0], true, !ins.PreventAutoCommit /* canAutocommit */) - if err != nil { - return 0, nil, err - } - - // If processGenerateFromValues generated new values, it supersedes - // any ids that MySQL might have generated. If both generated - // values, we don't return an error because this behavior - // is required to support migration. - if insertID != 0 { - qr.InsertID = uint64(insertID) - } else { - insertID = int64(qr.InsertID) - } - return insertID, qr, nil +// InsertVarName returns a name for the bind var for this column. This method is used by the planner and engine, +// to make sure they both produce the same names +func InsertVarName(col sqlparser.IdentifierCI, rowNum int) string { + return fmt.Sprintf("_%s_%d", col.CompliantName(), rowNum) } diff --git a/go/vt/vtgate/engine/insert_common.go b/go/vt/vtgate/engine/insert_common.go new file mode 100644 index 00000000000..d0a14feec26 --- /dev/null +++ b/go/vt/vtgate/engine/insert_common.go @@ -0,0 +1,480 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +type ( + InsertCommon struct { + // Opcode is the execution opcode. + Opcode InsertOpcode + + // Keyspace specifies the keyspace to send the query to. + Keyspace *vindexes.Keyspace + + // Ignore is for INSERT IGNORE and INSERT...ON DUPLICATE KEY constructs + // for sharded cases. + Ignore bool + + // TableName is the name of the table on which row will be inserted. + TableName string + + // Option to override the standard behavior and allow a multi-shard insert + // to use single round trip autocommit. + // + // This is a clear violation of the SQL semantics since it means the statement + // is not atomic in the presence of PK conflicts on one shard and not another. + // However, some application use cases would prefer that the statement partially + // succeed in order to get the performance benefits of autocommit. + MultiShardAutocommit bool + + // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query + QueryTimeout int + + // ForceNonStreaming is true when the insert table and select table are same. + // This will avoid locking by the select table. + ForceNonStreaming bool + + PreventAutoCommit bool + + // Generate is only set for inserts where a sequence must be generated. + Generate *Generate + + // ColVindexes are the vindexes that will use the VindexValues + ColVindexes []*vindexes.ColumnVindex + + // Prefix, Suffix are for sharded insert plans. + Prefix string + Suffix string + + // Insert needs tx handling + txNeeded + } + + ksID = []byte + + // Generate represents the instruction to generate + // a value from a sequence. + Generate struct { + Keyspace *vindexes.Keyspace + Query string + // Values are the supplied values for the column, which + // will be stored as a list within the expression. New + // values will be generated based on how many were not + // supplied (NULL). + Values evalengine.Expr + // Insert using Select, offset for auto increment column + Offset int + } + + // InsertOpcode is a number representing the opcode + // for the Insert primitive. + InsertOpcode int +) + +const nextValBV = "n" + +const ( + // InsertUnsharded is for routing an insert statement + // to an unsharded keyspace. + InsertUnsharded = InsertOpcode(iota) + // InsertSharded is for routing an insert statement + // to individual shards. Requires: A list of Values, one + // for each ColVindex. If the table has an Autoinc column, + // A Generate subplan must be created. + InsertSharded +) + +var insName = map[InsertOpcode]string{ + InsertUnsharded: "InsertUnsharded", + InsertSharded: "InsertSharded", +} + +// String returns the opcode +func (code InsertOpcode) String() string { + return strings.ReplaceAll(insName[code], "Insert", "") +} + +// MarshalJSON serializes the InsertOpcode as a JSON string. +// It's used for testing and diagnostics. +func (code InsertOpcode) MarshalJSON() ([]byte, error) { + return json.Marshal(insName[code]) +} + +// GetKeyspaceName specifies the Keyspace that this primitive routes to. +func (ic *InsertCommon) GetKeyspaceName() string { + return ic.Keyspace.Name +} + +// GetTableName specifies the table that this primitive routes to. +func (ic *InsertCommon) GetTableName() string { + return ic.TableName +} + +// GetFields fetches the field info. +func (ic *InsertCommon) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT13001("unexpected fields call for insert query") +} + +func (ins *InsertCommon) executeUnshardedTableQuery(ctx context.Context, vcursor VCursor, loggingPrimitive Primitive, bindVars map[string]*querypb.BindVariable, query string, insertID uint64) (*sqltypes.Result, error) { + rss, _, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) + if err != nil { + return nil, err + } + if len(rss) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) + } + err = allowOnlyPrimary(rss...) + if err != nil { + return nil, err + } + qr, err := execShard(ctx, loggingPrimitive, vcursor, query, bindVars, rss[0], true, !ins.PreventAutoCommit /* canAutocommit */) + if err != nil { + return nil, err + } + + // If processGenerateFromValues generated new values, it supersedes + // any ids that MySQL might have generated. If both generated + // values, we don't return an error because this behavior + // is required to support migration. + if insertID != 0 { + qr.InsertID = insertID + } + return qr, nil +} + +func (ins *InsertCommon) processVindexes(ctx context.Context, vcursor VCursor, vindexRowsValues [][]sqltypes.Row) ([]ksID, error) { + colVindexes := ins.ColVindexes + keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) + if err != nil { + return nil, err + } + + for vIdx := 1; vIdx < len(colVindexes); vIdx++ { + colVindex := colVindexes[vIdx] + if colVindex.Owned { + err = ins.processOwned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) + } else { + err = ins.processUnowned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) + } + if err != nil { + return nil, err + } + } + return keyspaceIDs, nil +} + +// processPrimary maps the primary vindex values to the keyspace ids. +func (ic *InsertCommon) processPrimary(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex) ([]ksID, error) { + destinations, err := vindexes.Map(ctx, colVindex.Vindex, vcursor, vindexColumnsKeys) + if err != nil { + return nil, err + } + + keyspaceIDs := make([]ksID, len(destinations)) + for i, destination := range destinations { + switch d := destination.(type) { + case key.DestinationKeyspaceID: + // This is a single keyspace id, we're good. + keyspaceIDs[i] = d + case key.DestinationNone: + // No valid keyspace id, we may return an error. + if !ic.Ignore { + return nil, fmt.Errorf("could not map %v to a keyspace id", vindexColumnsKeys[i]) + } + default: + return nil, fmt.Errorf("could not map %v to a unique keyspace id: %v", vindexColumnsKeys[i], destination) + } + } + + return keyspaceIDs, nil +} + +// processOwned creates vindex entries for the values of an owned column. +func (ic *InsertCommon) processOwned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { + if !ic.Ignore { + return colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, vindexColumnsKeys, ksids, false /* ignoreMode */) + } + + // InsertIgnore + var createIndexes []int + var createKeys []sqltypes.Row + var createKsids []ksID + + for rowNum, rowColumnKeys := range vindexColumnsKeys { + if ksids[rowNum] == nil { + continue + } + createIndexes = append(createIndexes, rowNum) + createKeys = append(createKeys, rowColumnKeys) + createKsids = append(createKsids, ksids[rowNum]) + } + if createKeys == nil { + return nil + } + + err := colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, createKeys, createKsids, true) + if err != nil { + return err + } + // After creation, verify that the keys map to the keyspace ids. If not, remove + // those that don't map. + verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, createKeys, createKsids) + if err != nil { + return err + } + for i, v := range verified { + if !v { + ksids[createIndexes[i]] = nil + } + } + return nil +} + +// processUnowned either reverse maps or validates the values for an unowned column. +func (ic *InsertCommon) processUnowned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { + var reverseIndexes []int + var reverseKsids []ksID + + var verifyIndexes []int + var verifyKeys []sqltypes.Row + var verifyKsids []ksID + + // Check if this VIndex is reversible or not. + reversibleVindex, isReversible := colVindex.Vindex.(vindexes.Reversible) + + for rowNum, rowColumnKeys := range vindexColumnsKeys { + // If we weren't able to determine a keyspace id from the primary VIndex, skip this row + if ksids[rowNum] == nil { + continue + } + + if rowColumnKeys[0].IsNull() { + // If the value of the column is `NULL`, but this is a reversible VIndex, + // we will try to generate the value from the keyspace id generated by the primary VIndex. + if isReversible { + reverseIndexes = append(reverseIndexes, rowNum) + reverseKsids = append(reverseKsids, ksids[rowNum]) + } + + // Otherwise, don't do anything. Whether `NULL` is a valid value for this column will be + // handled by MySQL. + } else { + // If a value for this column was specified, the keyspace id values from the + // secondary VIndex need to be verified against the keyspace id from the primary VIndex + verifyIndexes = append(verifyIndexes, rowNum) + verifyKeys = append(verifyKeys, rowColumnKeys) + verifyKsids = append(verifyKsids, ksids[rowNum]) + } + } + + // Reverse map values for secondary VIndex columns from the primary VIndex's keyspace id. + if reverseKsids != nil { + reverseKeys, err := reversibleVindex.ReverseMap(vcursor, reverseKsids) + if err != nil { + return err + } + + for i, reverseKey := range reverseKeys { + // Fill the first column with the reverse-mapped value. + vindexColumnsKeys[reverseIndexes[i]][0] = reverseKey + } + } + + // Verify that the keyspace ids generated by the primary and secondary VIndexes match + if verifyIndexes != nil { + // If values were supplied, we validate against keyspace id. + verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, verifyKeys, verifyKsids) + if err != nil { + return err + } + + var mismatchVindexKeys []sqltypes.Row + for i, v := range verified { + rowNum := verifyIndexes[i] + if !v { + if !ic.Ignore { + mismatchVindexKeys = append(mismatchVindexKeys, vindexColumnsKeys[rowNum]) + continue + } + + // Skip the whole row if this is a `INSERT IGNORE` or `INSERT ... ON DUPLICATE KEY ...` statement + // but the keyspace ids didn't match. + ksids[verifyIndexes[i]] = nil + } + } + + if mismatchVindexKeys != nil { + return fmt.Errorf("values %v for column %v does not map to keyspace ids", mismatchVindexKeys, colVindex.Columns) + } + } + + return nil +} + +// processGenerateFromSelect generates new values using a sequence if necessary. +// If no value was generated, it returns 0. Values are generated only +// for cases where none are supplied. +func (ic *InsertCommon) processGenerateFromSelect( + ctx context.Context, + vcursor VCursor, + loggingPrimitive Primitive, + rows []sqltypes.Row, +) (insertID int64, err error) { + if ic.Generate == nil { + return 0, nil + } + var count int64 + offset := ic.Generate.Offset + genColPresent := offset < len(rows[0]) + if genColPresent { + for _, row := range rows { + if shouldGenerate(row[offset], evalengine.ParseSQLMode(vcursor.SQLMode())) { + count++ + } + } + } else { + count = int64(len(rows)) + } + + if count == 0 { + return 0, nil + } + + insertID, err = ic.execGenerate(ctx, vcursor, loggingPrimitive, count) + if err != nil { + return 0, err + } + + used := insertID + for idx, val := range rows { + if genColPresent { + if shouldGenerate(val[offset], evalengine.ParseSQLMode(vcursor.SQLMode())) { + val[offset] = sqltypes.NewInt64(used) + used++ + } + } else { + rows[idx] = append(val, sqltypes.NewInt64(used)) + used++ + } + } + + return insertID, nil +} + +// processGenerateFromValues generates new values using a sequence if necessary. +// If no value was generated, it returns 0. Values are generated only +// for cases where none are supplied. +func (ic *InsertCommon) processGenerateFromValues( + ctx context.Context, + vcursor VCursor, + loggingPrimitive Primitive, + bindVars map[string]*querypb.BindVariable, +) (insertID int64, err error) { + if ic.Generate == nil { + return 0, nil + } + + // Scan input values to compute the number of values to generate, and + // keep track of where they should be filled. + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + resolved, err := env.Evaluate(ic.Generate.Values) + if err != nil { + return 0, err + } + count := int64(0) + values := resolved.TupleValues() + for _, val := range values { + if shouldGenerate(val, evalengine.ParseSQLMode(vcursor.SQLMode())) { + count++ + } + } + + // If generation is needed, generate the requested number of values (as one call). + if count != 0 { + insertID, err = ic.execGenerate(ctx, vcursor, loggingPrimitive, count) + if err != nil { + return 0, err + } + } + + // Fill the holes where no value was supplied. + cur := insertID + for i, v := range values { + if shouldGenerate(v, evalengine.ParseSQLMode(vcursor.SQLMode())) { + bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.Int64BindVariable(cur) + cur++ + } else { + bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.ValueBindVariable(v) + } + } + return insertID, nil +} + +func (ic *InsertCommon) execGenerate(ctx context.Context, vcursor VCursor, loggingPrimitive Primitive, count int64) (int64, error) { + // If generation is needed, generate the requested number of values (as one call). + rss, _, err := vcursor.ResolveDestinations(ctx, ic.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) + if err != nil { + return 0, err + } + if len(rss) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) + } + bindVars := map[string]*querypb.BindVariable{nextValBV: sqltypes.Int64BindVariable(count)} + qr, err := vcursor.ExecuteStandalone(ctx, loggingPrimitive, ic.Generate.Query, bindVars, rss[0]) + if err != nil { + return 0, err + } + // If no rows are returned, it's an internal error, and the code + // must panic, which will be caught and reported. + return qr.Rows[0][0].ToCastInt64() +} + +// shouldGenerate determines if a sequence value should be generated for a given value +func shouldGenerate(v sqltypes.Value, sqlmode evalengine.SQLMode) bool { + if v.IsNull() { + return true + } + + // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also + // treats 0 as a value that should generate a new sequence. + value, err := evalengine.CoerceTo(v, sqltypes.Uint64, sqlmode) + if err != nil { + return false + } + + id, err := value.ToCastUint64() + if err != nil { + return false + } + + return id == 0 +} diff --git a/go/vt/vtgate/engine/insert_select.go b/go/vt/vtgate/engine/insert_select.go new file mode 100644 index 00000000000..d36176922dc --- /dev/null +++ b/go/vt/vtgate/engine/insert_select.go @@ -0,0 +1,423 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "sync" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*InsertSelect)(nil) + +type ( + // InsertSelect represents the instructions to perform an insert operation with input rows from a select. + InsertSelect struct { + InsertCommon + + // Input is a select query plan to retrieve results for inserting data. + Input Primitive + + // VindexValueOffset stores the offset for each column in the ColumnVindex + // that will appear in the result set of the select query. + VindexValueOffset [][]int + } +) + +// newInsertSelect creates a new InsertSelect. +func newInsertSelect( + ignore bool, + keyspace *vindexes.Keyspace, + table *vindexes.Table, + prefix string, + suffix string, + vv [][]int, + input Primitive, +) *InsertSelect { + ins := &InsertSelect{ + InsertCommon: InsertCommon{ + Ignore: ignore, + Keyspace: keyspace, + Prefix: prefix, + Suffix: suffix, + }, + Input: input, + VindexValueOffset: vv, + } + if table != nil { + ins.TableName = table.Name.String() + for _, colVindex := range table.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + ins.ColVindexes = append(ins.ColVindexes, colVindex) + } + } + return ins +} + +func (ins *InsertSelect) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ins.Input}, nil +} + +// RouteType returns a description of the query routing type used by the primitive +func (ins *InsertSelect) RouteType() string { + return "InsertSelect" +} + +// TryExecute performs a non-streaming exec. +func (ins *InsertSelect) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) + defer cancelFunc() + + if ins.Keyspace.Sharded { + return ins.execInsertSharded(ctx, vcursor, bindVars) + } + return ins.execInsertUnsharded(ctx, vcursor, bindVars) +} + +// TryStreamExecute performs a streaming exec. +func (ins *InsertSelect) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + if ins.ForceNonStreaming { + res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(res) + } + ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) + defer cancelFunc() + + sharded := ins.Keyspace.Sharded + output := &sqltypes.Result{} + err := ins.execSelectStreaming(ctx, vcursor, bindVars, func(irr insertRowsResult) error { + if len(irr.rows) == 0 { + return nil + } + + var qr *sqltypes.Result + var err error + if sharded { + qr, err = ins.insertIntoShardedTable(ctx, vcursor, bindVars, irr) + } else { + qr, err = ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, irr) + } + if err != nil { + return err + } + + output.RowsAffected += qr.RowsAffected + // InsertID needs to be updated to the least insertID value in sqltypes.Result + if output.InsertID == 0 || output.InsertID > qr.InsertID { + output.InsertID = qr.InsertID + } + return nil + }) + if err != nil { + return err + } + return callback(output) +} + +func (ins *InsertSelect) execInsertUnsharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + irr, err := ins.execSelect(ctx, vcursor, bindVars) + if err != nil { + return nil, err + } + if len(irr.rows) == 0 { + return &sqltypes.Result{}, nil + } + return ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, irr) +} + +func (ins *InsertSelect) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, irr insertRowsResult) (*sqltypes.Result, error) { + query := ins.getInsertUnshardedQuery(irr.rows, bindVars) + return ins.executeUnshardedTableQuery(ctx, vcursor, ins, bindVars, query, irr.insertID) +} + +func (ins *InsertSelect) getInsertUnshardedQuery(rows []sqltypes.Row, bindVars map[string]*querypb.BindVariable) string { + var mids sqlparser.Values + for r, inputRow := range rows { + row := sqlparser.ValTuple{} + for c, value := range inputRow { + bvName := insertVarOffset(r, c) + bindVars[bvName] = sqltypes.ValueBindVariable(value) + row = append(row, sqlparser.NewArgument(bvName)) + } + mids = append(mids, row) + } + return ins.Prefix + sqlparser.String(mids) + ins.Suffix +} + +func (ins *InsertSelect) insertIntoShardedTable( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + irr insertRowsResult, +) (*sqltypes.Result, error) { + rss, queries, err := ins.getInsertShardedQueries(ctx, vcursor, bindVars, irr.rows) + if err != nil { + return nil, err + } + + qr, err := ins.executeInsertQueries(ctx, vcursor, rss, queries, irr.insertID) + if err != nil { + return nil, err + } + qr.InsertID = uint64(irr.insertID) + return qr, nil +} + +func (ins *InsertSelect) executeInsertQueries( + ctx context.Context, + vcursor VCursor, + rss []*srvtopo.ResolvedShard, + queries []*querypb.BoundQuery, + insertID uint64, +) (*sqltypes.Result, error) { + autocommit := (len(rss) == 1 || ins.MultiShardAutocommit) && vcursor.AutocommitApproval() + err := allowOnlyPrimary(rss...) + if err != nil { + return nil, err + } + result, errs := vcursor.ExecuteMultiShard(ctx, ins, rss, queries, true /* rollbackOnError */, autocommit) + if errs != nil { + return nil, vterrors.Aggregate(errs) + } + + if insertID != 0 { + result.InsertID = insertID + } + return result, nil +} + +func (ins *InsertSelect) getInsertShardedQueries( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + rows []sqltypes.Row, +) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { + vindexRowsValues, err := ins.buildVindexRowsValues(rows) + if err != nil { + return nil, nil, err + } + + keyspaceIDs, err := ins.processVindexes(ctx, vcursor, vindexRowsValues) + if err != nil { + return nil, nil, err + } + + var indexes []*querypb.Value + var destinations []key.Destination + for i, ksid := range keyspaceIDs { + if ksid != nil { + indexes = append(indexes, &querypb.Value{ + Value: strconv.AppendInt(nil, int64(i), 10), + }) + destinations = append(destinations, key.DestinationKeyspaceID(ksid)) + } + } + if len(destinations) == 0 { + // In this case, all we have is nil KeyspaceIds, we don't do + // anything at all. + return nil, nil, nil + } + + rss, indexesPerRss, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, indexes, destinations) + if err != nil { + return nil, nil, err + } + + queries := make([]*querypb.BoundQuery, len(rss)) + for i := range rss { + bvs := sqltypes.CopyBindVariables(bindVars) // we don't want to create one huge bindvars for all values + var mids sqlparser.Values + for _, indexValue := range indexesPerRss[i] { + index, _ := strconv.Atoi(string(indexValue.Value)) + if keyspaceIDs[index] != nil { + row := sqlparser.ValTuple{} + for colOffset, value := range rows[index] { + bvName := insertVarOffset(index, colOffset) + bvs[bvName] = sqltypes.ValueBindVariable(value) + row = append(row, sqlparser.NewArgument(bvName)) + } + mids = append(mids, row) + } + } + rewritten := ins.Prefix + sqlparser.String(mids) + ins.Suffix + queries[i] = &querypb.BoundQuery{ + Sql: rewritten, + BindVariables: bvs, + } + } + + return rss, queries, nil +} + +func (ins *InsertSelect) buildVindexRowsValues(rows []sqltypes.Row) ([][]sqltypes.Row, error) { + colVindexes := ins.ColVindexes + if len(colVindexes) != len(ins.VindexValueOffset) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") + } + + // Here we go over the incoming rows and extract values for the vindexes we need to update + vindexRowsValues := make([][]sqltypes.Row, len(colVindexes)) + for _, inputRow := range rows { + for colIdx := range colVindexes { + offsets := ins.VindexValueOffset[colIdx] + row := make(sqltypes.Row, 0, len(offsets)) + for _, offset := range offsets { + if offset == -1 { // value not provided from select query + row = append(row, sqltypes.NULL) + continue + } + row = append(row, inputRow[offset]) + } + vindexRowsValues[colIdx] = append(vindexRowsValues[colIdx], row) + } + } + return vindexRowsValues, nil +} + +func (ins *InsertSelect) execInsertSharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + result, err := ins.execSelect(ctx, vcursor, bindVars) + if err != nil { + return nil, err + } + if len(result.rows) == 0 { + return &sqltypes.Result{}, nil + } + + return ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) +} + +func (ins *InsertSelect) description() PrimitiveDescription { + other := ins.commonDesc() + other["TableName"] = ins.GetTableName() + + if len(ins.VindexValueOffset) > 0 { + valuesOffsets := map[string]string{} + for idx, ints := range ins.VindexValueOffset { + if len(ins.ColVindexes) < idx { + panic("ins.ColVindexes and ins.VindexValueOffset do not line up") + } + vindex := ins.ColVindexes[idx] + marshal, _ := json.Marshal(ints) + valuesOffsets[vindex.Name] = string(marshal) + } + other["VindexOffsetFromSelect"] = valuesOffsets + } + + return PrimitiveDescription{ + OperatorType: "Insert", + Keyspace: ins.Keyspace, + Variant: "Select", + TargetTabletType: topodatapb.TabletType_PRIMARY, + Other: other, + } +} + +func (ic *InsertCommon) commonDesc() map[string]any { + other := map[string]any{ + "MultiShardAutocommit": ic.MultiShardAutocommit, + "QueryTimeout": ic.QueryTimeout, + "InsertIgnore": ic.Ignore, + "InputAsNonStreaming": ic.ForceNonStreaming, + "NoAutoCommit": ic.PreventAutoCommit, + } + + if ic.Generate != nil { + if ic.Generate.Values == nil { + other["AutoIncrement"] = fmt.Sprintf("%s:Offset(%d)", ic.Generate.Query, ic.Generate.Offset) + } else { + other["AutoIncrement"] = fmt.Sprintf("%s:Values::%s", ic.Generate.Query, sqlparser.String(ic.Generate.Values)) + } + } + return other +} + +func insertVarOffset(rowNum, colOffset int) string { + return fmt.Sprintf("_c%d_%d", rowNum, colOffset) +} + +type insertRowsResult struct { + rows []sqltypes.Row + insertID uint64 +} + +func (ins *InsertSelect) execSelect( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, +) (insertRowsResult, error) { + res, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) + if err != nil || len(res.Rows) == 0 { + return insertRowsResult{}, err + } + + insertID, err := ins.processGenerateFromSelect(ctx, vcursor, ins, res.Rows) + if err != nil { + return insertRowsResult{}, err + } + + return insertRowsResult{ + rows: res.Rows, + insertID: uint64(insertID), + }, nil +} + +func (ins *InsertSelect) execSelectStreaming( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + callback func(irr insertRowsResult) error, +) error { + var mu sync.Mutex + return vcursor.StreamExecutePrimitiveStandalone(ctx, ins.Input, bindVars, false, func(result *sqltypes.Result) error { + if len(result.Rows) == 0 { + return nil + } + + // should process only one chunk at a time. + // as parallel chunk insert will try to use the same transaction in the vttablet + // this will cause transaction in use error out with "transaction in use" error. + mu.Lock() + defer mu.Unlock() + + insertID, err := ins.processGenerateFromSelect(ctx, vcursor, ins, result.Rows) + if err != nil { + return err + } + + return callback(insertRowsResult{ + rows: result.Rows, + insertID: uint64(insertID), + }) + }) +} diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index d08ef856275..4ee8431f083 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -33,7 +33,7 @@ import ( ) func TestInsertUnsharded(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -68,7 +68,7 @@ func TestInsertUnsharded(t *testing.T) { } func TestInsertUnshardedGenerate(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -121,7 +121,7 @@ func TestInsertUnshardedGenerate(t *testing.T) { } func TestInsertUnshardedGenerate_Zeros(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -198,7 +198,7 @@ func TestInsertShardedSimple(t *testing.T) { ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -232,7 +232,7 @@ func TestInsertShardedSimple(t *testing.T) { }) // Multiple rows are not autocommitted by default - ins = NewInsert( + ins = newInsert( InsertSharded, false, ks.Keyspace, @@ -272,7 +272,7 @@ func TestInsertShardedSimple(t *testing.T) { }) // Optional flag overrides autocommit - ins = NewInsert( + ins = newInsert( InsertSharded, false, ks.Keyspace, @@ -344,7 +344,7 @@ func TestInsertShardedFail(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -394,7 +394,7 @@ func TestInsertShardedGenerate(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -513,7 +513,7 @@ func TestInsertShardedOwned(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -623,7 +623,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -700,7 +700,7 @@ func TestInsertShardedGeo(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -806,7 +806,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -962,7 +962,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -1060,7 +1060,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1188,7 +1188,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -1294,7 +1294,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1371,7 +1371,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1485,7 +1485,7 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1533,21 +1533,13 @@ func TestInsertSelectSimple(t *testing.T) { ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{{1}}, - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t1"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect(false, ks.Keyspace, ks.Tables["t1"], "prefix ", " suffix", [][]int{{1}}, rb) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1623,23 +1615,24 @@ func TestInsertSelectOwned(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + " suffix", + [][]int{ {1}, // The primary vindex has a single column as sharding key {0}}, // the onecol vindex uses the 'name' column - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t1"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb, + ) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1723,24 +1716,22 @@ func TestInsertSelectGenerate(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := NewInsert( - InsertSelect, - false, - ks.Keyspace, - nil, - ks.Tables["t1"], - "prefix ", - nil, - " suffix") - ins.Query = "dummy_insert" - ins.VindexValueOffset = [][]int{{1}} // The primary vindex has a single column as sharding key - ins.Input = &Route{ + rb := &Route{ Query: "dummy_select", FieldQuery: "dummy_field_query", RoutingParameters: &RoutingParameters{ Opcode: Scatter, Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + " suffix", + [][]int{{1}}, // The primary vindex has a single column as sharding key + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1760,7 +1751,7 @@ func TestInsertSelectGenerate(t *testing.T) { "varchar|int64"), "a|1", "a|null", - "b|null"), + "b|0"), // This is the result for the sequence query sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -1817,20 +1808,23 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + " suffix", + [][]int{ + {1}}, // The primary vindex has a single column as sharding key + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1839,8 +1833,6 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Query: "dummy_generate", Offset: 1, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1913,20 +1905,21 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + " suffix", + [][]int{{1}}, // The primary vindex has a single column as sharding key, + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1935,8 +1928,6 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Query: "dummy_generate", Offset: 2, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -2001,20 +1992,21 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + " suffix", + [][]int{{1}}, // The primary vindex has a single column as sharding key, + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -2023,8 +2015,6 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Query: "dummy_generate", Offset: 2, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -2099,22 +2089,21 @@ func TestInsertSelectUnowned(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {0}}, // the onecol vindex as unowned lookup sharding column - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t2"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t2"], + "prefix ", + " suffix", + [][]int{{0}}, // // the onecol vindex as unowned lookup sharding column + rb, + ) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -2220,16 +2209,15 @@ func TestInsertSelectShardingCases(t *testing.T) { RoutingParameters: &RoutingParameters{Opcode: Unsharded, Keyspace: uks2.Keyspace}} // sks1 and sks2 - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: sks1.Keyspace, - Query: "dummy_insert", - Prefix: "prefix ", - Suffix: " suffix", - ColVindexes: sks1.Tables["s1"].ColumnVindexes, - VindexValueOffset: [][]int{{0}}, - Input: sRoute, - } + ins := newInsertSelect( + false, + sks1.Keyspace, + sks1.Tables["s1"], + "prefix ", + " suffix", + [][]int{{0}}, + sRoute, + ) vc := &loggingVCursor{ resolvedTargetTabletType: topodatapb.TabletType_PRIMARY, @@ -2298,14 +2286,15 @@ func TestInsertSelectShardingCases(t *testing.T) { `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) // uks1 and sks2 - ins = &Insert{ - Opcode: InsertUnsharded, - Keyspace: uks1.Keyspace, - Query: "dummy_insert", - Prefix: "prefix ", - Suffix: " suffix", - Input: sRoute, - } + ins = newInsertSelect( + false, + uks1.Keyspace, + nil, + "prefix ", + " suffix", + nil, + sRoute, + ) vc.Rewind() _, err = ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index 39144fc858d..173c0213073 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -91,28 +90,30 @@ func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vin } func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { - eIns := &engine.Insert{} - eIns.Keyspace = ks - eIns.TableName = tables[0].Name.String() - eIns.Opcode = engine.InsertUnsharded + eIns := &engine.Insert{ + InsertCommon: engine.InsertCommon{ + Opcode: engine.InsertUnsharded, + Keyspace: ks, + TableName: tables[0].Name.String(), + }, + } eIns.Query = generateQuery(stmt) return &insert{eInsert: eIns} } type insert struct { - eInsert *engine.Insert - source logicalPlan + eInsert *engine.Insert + eInsertSelect *engine.InsertSelect + source logicalPlan } var _ logicalPlan = (*insert)(nil) func (i *insert) Primitive() engine.Primitive { - if i.source != nil { - i.eInsert.Input = i.source.Primitive() + if i.source == nil { + return i.eInsert } - return i.eInsert -} - -func (i *insert) ContainsTables() semantics.TableSet { - panic("does not expect insert to get contains tables call") + input := i.source.Primitive() + i.eInsertSelect.Input = input + return i.eInsertSelect } diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index d6732ada87d..5f965b55ad9 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -110,20 +110,20 @@ func transformInsertionSelection(ctx *plancontext.PlanningContext, op *operators } ins := dmlOp.(*operators.Insert) - eins := &engine.Insert{ - Opcode: mapToInsertOpCode(rb.Routing.OpCode(), true), - Keyspace: rb.Routing.Keyspace(), - TableName: ins.VTable.Name.String(), - Ignore: ins.Ignore, - ForceNonStreaming: op.ForceNonStreaming, - Generate: autoIncGenerate(ins.AutoIncrement), - ColVindexes: ins.ColVindexes, - VindexValues: ins.VindexValues, + eins := &engine.InsertSelect{ + InsertCommon: engine.InsertCommon{ + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), + Ignore: ins.Ignore, + ForceNonStreaming: op.ForceNonStreaming, + Generate: autoIncGenerate(ins.AutoIncrement), + ColVindexes: ins.ColVindexes, + }, VindexValueOffset: ins.VindexValueOffset, } - lp := &insert{eInsert: eins} + lp := &insert{eInsertSelect: eins} - eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) + eins.Prefix, _, eins.Suffix = generateInsertShardedQuery(ins.AST) selectionPlan, err := transformToLogicalPlan(ctx, op.Select) if err != nil { @@ -548,15 +548,23 @@ func buildInsertLogicalPlan( hints *queryHints, ) (logicalPlan, error) { ins := op.(*operators.Insert) + + ic := engine.InsertCommon{ + Opcode: mapToInsertOpCode(rb.Routing.OpCode()), + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), + Ignore: ins.Ignore, + Generate: autoIncGenerate(ins.AutoIncrement), + ColVindexes: ins.ColVindexes, + } + if hints != nil { + ic.MultiShardAutocommit = hints.multiShardAutocommit + ic.QueryTimeout = hints.queryTimeout + } + eins := &engine.Insert{ - Opcode: mapToInsertOpCode(rb.Routing.OpCode(), false), - Keyspace: rb.Routing.Keyspace(), - TableName: ins.VTable.Name.String(), - Ignore: ins.Ignore, - Generate: autoIncGenerate(ins.AutoIncrement), - ColVindexes: ins.ColVindexes, - VindexValues: ins.VindexValues, - VindexValueOffset: ins.VindexValueOffset, + InsertCommon: ic, + VindexValues: ins.VindexValues, } lp := &insert{eInsert: eins} @@ -566,22 +574,14 @@ func buildInsertLogicalPlan( eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) } - if hints != nil { - eins.MultiShardAutocommit = hints.multiShardAutocommit - eins.QueryTimeout = hints.queryTimeout - } - eins.Query = generateQuery(stmt) return lp, nil } -func mapToInsertOpCode(code engine.Opcode, insertSelect bool) engine.InsertOpcode { +func mapToInsertOpCode(code engine.Opcode) engine.InsertOpcode { if code == engine.Unsharded { return engine.InsertUnsharded } - if insertSelect { - return engine.InsertSelect - } return engine.InsertSharded } diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index 5ec8210b12d..ec39db0b158 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -4084,7 +4084,7 @@ "Original": "insert into unsharded(col) select col from unsharded_tab", "Instructions": { "OperatorType": "Insert", - "Variant": "Unsharded", + "Variant": "Select", "Keyspace": { "Name": "main", "Sharded": false @@ -4119,7 +4119,7 @@ "Original": "insert into unsharded(col) select col from t1", "Instructions": { "OperatorType": "Insert", - "Variant": "Unsharded", + "Variant": "Select", "Keyspace": { "Name": "main", "Sharded": false From 9850f012a65289a808851bdfc44c1d2ff7fd6713 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Thu, 30 Nov 2023 05:17:56 +0530 Subject: [PATCH 18/33] Add wait for reading mycnf to prevent race (#14626) Signed-off-by: Manan Gupta --- go/vt/mysqlctl/cmd.go | 2 +- go/vt/mysqlctl/mycnf.go | 19 +++++++++- go/vt/mysqlctl/mycnf_flag.go | 8 ++++- go/vt/mysqlctl/mycnf_test.go | 68 ++++++++++++++++++++++-------------- 4 files changed, 68 insertions(+), 29 deletions(-) diff --git a/go/vt/mysqlctl/cmd.go b/go/vt/mysqlctl/cmd.go index 5c3bda11437..222a39e26ee 100644 --- a/go/vt/mysqlctl/cmd.go +++ b/go/vt/mysqlctl/cmd.go @@ -55,7 +55,7 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( // of the MySQL instance. func OpenMysqldAndMycnf(tabletUID uint32) (*Mysqld, *Mycnf, error) { // We pass a port of 0, this will be read and overwritten from the path on disk - mycnf, err := ReadMycnf(NewMycnf(tabletUID, 0)) + mycnf, err := ReadMycnf(NewMycnf(tabletUID, 0), 0) if err != nil { return nil, nil, fmt.Errorf("couldn't read my.cnf file: %v", err) } diff --git a/go/vt/mysqlctl/mycnf.go b/go/vt/mysqlctl/mycnf.go index 3af6b8e8607..dad91e20fed 100644 --- a/go/vt/mysqlctl/mycnf.go +++ b/go/vt/mysqlctl/mycnf.go @@ -28,6 +28,7 @@ import ( "os" "path" "strconv" + "time" ) // Mycnf is a memory structure that contains a bunch of interesting @@ -112,6 +113,10 @@ type Mycnf struct { Path string // the actual path that represents this mycnf } +const ( + myCnfWaitRetryTime = 100 * time.Millisecond +) + // TabletDir returns the tablet directory. func (cnf *Mycnf) TabletDir() string { return path.Dir(cnf.DataDir) @@ -153,8 +158,20 @@ func normKey(bkey []byte) string { // ReadMycnf will read an existing my.cnf from disk, and update the passed in Mycnf object // with values from the my.cnf on disk. -func ReadMycnf(mycnf *Mycnf) (*Mycnf, error) { +func ReadMycnf(mycnf *Mycnf, waitTime time.Duration) (*Mycnf, error) { f, err := os.Open(mycnf.Path) + if waitTime != 0 { + timer := time.NewTimer(waitTime) + for err != nil { + select { + case <-timer.C: + return nil, err + default: + time.Sleep(myCnfWaitRetryTime) + f, err = os.Open(mycnf.Path) + } + } + } if err != nil { return nil, err } diff --git a/go/vt/mysqlctl/mycnf_flag.go b/go/vt/mysqlctl/mycnf_flag.go index 33a18d69940..8559e5c1431 100644 --- a/go/vt/mysqlctl/mycnf_flag.go +++ b/go/vt/mysqlctl/mycnf_flag.go @@ -17,6 +17,8 @@ limitations under the License. package mysqlctl import ( + "time" + "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" @@ -51,6 +53,10 @@ var ( flagMycnfFile string ) +const ( + waitForMyCnf = 10 * time.Second +) + // RegisterFlags registers the command line flags for // specifying the values of a mycnf config file. See NewMycnfFromFlags // to get the supported modes. @@ -129,5 +135,5 @@ func NewMycnfFromFlags(uid uint32) (mycnf *Mycnf, err error) { } mycnf = NewMycnf(uid, 0) mycnf.Path = flagMycnfFile - return ReadMycnf(mycnf) + return ReadMycnf(mycnf, waitForMyCnf) } diff --git a/go/vt/mysqlctl/mycnf_test.go b/go/vt/mysqlctl/mycnf_test.go index d422ed899c4..fc54f063618 100644 --- a/go/vt/mysqlctl/mycnf_test.go +++ b/go/vt/mysqlctl/mycnf_test.go @@ -20,7 +20,11 @@ import ( "bytes" "os" "strings" + "sync" "testing" + "time" + + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/servenv" @@ -29,6 +33,9 @@ import ( var MycnfPath = "/tmp/my.cnf" func TestMycnf(t *testing.T) { + // Remove any my.cnf file if it already exists. + os.Remove(MycnfPath) + uid := uint32(11111) cnf := NewMycnf(uid, 6802) myTemplateSource := new(bytes.Buffer) @@ -39,36 +46,45 @@ func TestMycnf(t *testing.T) { f, _ := os.ReadFile("../../../config/mycnf/default.cnf") myTemplateSource.Write(f) data, err := cnf.makeMycnf(myTemplateSource.String()) - if err != nil { - t.Errorf("err: %v", err) - } else { - t.Logf("data: %v", data) - } - err = os.WriteFile(MycnfPath, []byte(data), 0666) - if err != nil { - t.Errorf("failed creating my.cnf %v", err) - } - _, err = os.ReadFile(MycnfPath) - if err != nil { - t.Errorf("failed reading, err %v", err) - return - } + require.NoError(t, err) + t.Logf("data: %v", data) + + // Since there is no my.cnf file, reading it should fail with a no such file error. mycnf := NewMycnf(uid, 0) mycnf.Path = MycnfPath - mycnf, err = ReadMycnf(mycnf) - if err != nil { - t.Errorf("failed reading, err %v", err) - } else { + _, err = ReadMycnf(mycnf, 0) + require.ErrorContains(t, err, "no such file or directory") + + // Next up we will spawn a go-routine to try and read the cnf file with a timeout. + // We will create the cnf file after some delay and verify that ReadMycnf does wait that long + // and ends up succeeding in reading the my.cnf file. + waitTime := 1 * time.Second + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + defer wg.Done() + startTime := time.Now() + var readErr error + mycnf, readErr = ReadMycnf(mycnf, 1*time.Minute) + require.NoError(t, readErr, "failed reading") t.Logf("socket file %v", mycnf.SocketFile) - } + totalTimeSpent := time.Since(startTime) + require.GreaterOrEqual(t, totalTimeSpent, waitTime) + }() + + time.Sleep(waitTime) + err = os.WriteFile(MycnfPath, []byte(data), 0666) + require.NoError(t, err, "failed creating my.cnf") + _, err = os.ReadFile(MycnfPath) + require.NoError(t, err, "failed reading") + + // Wait for ReadMycnf to finish and then verify that the data read is correct. + wg.Wait() // Tablet UID should be 11111, which determines tablet/data dir. - if got, want := mycnf.DataDir, "/vt_0000011111/"; !strings.Contains(got, want) { - t.Errorf("mycnf.DataDir = %v, want *%v*", got, want) - } + require.Contains(t, mycnf.DataDir, "/vt_0000011111/") // MySQL server-id should be 22222, different from Tablet UID. - if got, want := mycnf.ServerID, uint32(22222); got != want { - t.Errorf("mycnf.ServerID = %v, want %v", got, want) - } + require.EqualValues(t, uint32(22222), mycnf.ServerID) } // Run this test if any changes are made to hook handling / make_mycnf hook @@ -112,7 +128,7 @@ func NoTestMycnfHook(t *testing.T) { } mycnf := NewMycnf(uid, 0) mycnf.Path = cnf.Path - mycnf, err = ReadMycnf(mycnf) + mycnf, err = ReadMycnf(mycnf, 0) if err != nil { t.Errorf("failed reading, err %v", err) } else { From 43f754e07a5fc2b717c32ec300e1f48854501c1f Mon Sep 17 00:00:00 2001 From: jwangace <121262788+jwangace@users.noreply.github.com> Date: Wed, 29 Nov 2023 23:03:16 -0800 Subject: [PATCH 19/33] increase vtctlclient backupShard command success rate (#14604) Signed-off-by: Jun Wang Signed-off-by: jwangace <121262788+jwangace@users.noreply.github.com> Co-authored-by: Jun Wang Co-authored-by: Andrew Mason --- go/vt/vtctl/grpcvtctldserver/server.go | 8 +- go/vt/vtctl/reparentutil/util.go | 12 +++ go/vt/vtctl/reparentutil/util_test.go | 102 +++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 1 deletion(-) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 21c4dc272f6..caa99d5e8c8 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -431,8 +431,14 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v span.Annotate("incremental_from_pos", req.IncrementalFromPos) tablets, stats, err := reparentutil.ShardReplicationStatuses(ctx, s.ts, s.tmc, req.Keyspace, req.Shard) + + // Instead of return on err directly, only return err when no tablets for backup at all if err != nil { - return err + tablets = reparentutil.GetBackupCandidates(tablets, stats) + // Only return err when no usable tablet + if len(tablets) == 0 { + return err + } } var ( diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index cfde8f34508..ac57e1230d1 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -343,3 +343,15 @@ func waitForCatchUp( } return nil } + +// GetBackupCandidates is used to get a list of healthy tablets for backup +func GetBackupCandidates(tablets []*topo.TabletInfo, stats []*replicationdatapb.Status) (res []*topo.TabletInfo) { + for i, stat := range stats { + // shardTablets[i] and stats[i] is 1:1 mapping + // Always include TabletType_PRIMARY. Healthy shardTablets[i] will be added to tablets + if tablets[i].Type == topodatapb.TabletType_PRIMARY || stat != nil { + res = append(res, tablets[i]) + } + } + return res +} diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index a9e6274d490..7b0d624590f 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -1218,3 +1218,105 @@ func Test_getTabletsWithPromotionRules(t *testing.T) { }) } } + +func TestGetBackupCandidates(t *testing.T) { + var ( + primaryTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 1, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + } + replicaTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 2, + }, + Type: topodatapb.TabletType_REPLICA, + }, + } + rdonlyTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 3, + }, + Type: topodatapb.TabletType_RDONLY, + }, + } + spareTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 4, + }, + Type: topodatapb.TabletType_SPARE, + }, + } + ) + tests := []struct { + name string + in []*topo.TabletInfo + expected []*topo.TabletInfo + status []*replicationdatapb.Status + }{ + { + name: "one primary tablet with status", + in: []*topo.TabletInfo{primaryTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{{}}, + }, + { + name: "one primary tablet with no status", + in: []*topo.TabletInfo{primaryTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{nil}, + }, + { + name: "4 tablets with no status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{nil, nil, nil, nil}, + }, + { + name: "4 tablets with full status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, {}, {}, {}}, + }, + { + name: "4 tablets with no primaryTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{nil, {}, {}, {}}, + }, + { + name: "4 tablets with no replicaTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, nil, {}, {}}, + }, + { + name: "4 tablets with no rdonlyTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, {}, nil, {}}, + }, + { + name: "4 tablets with no spareTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet}, + status: []*replicationdatapb.Status{{}, {}, {}, nil}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res := GetBackupCandidates(tt.in, tt.status) + require.EqualValues(t, tt.expected, res) + }) + } +} From 3176543aefa8b09796cc5c7d8bcf79386a1fa486 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Taylor?= Date: Thu, 30 Nov 2023 09:54:38 +0100 Subject: [PATCH 20/33] bugfix: do not rewrite an expression twice (#14641) --- .../planbuilder/operators/aggregator.go | 1 - .../planbuilder/testdata/aggr_cases.json | 75 +++++++++++++++++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index 685a418339a..269c4616274 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -186,7 +186,6 @@ func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlpa if offset >= 0 { return offset } - expr = a.DT.RewriteExpression(ctx, expr) // Aggregator is little special and cannot work if the input offset are not matched with the aggregation columns. // So, before pushing anything from above the aggregator offset planning needs to be completed. diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index 3302ca09fc7..2254baa36a6 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -6297,5 +6297,80 @@ "user.user" ] } + }, + { + "comment": "Group by aggregated column should not be a problem", + "query": "SELECT b.col FROM music AS b JOIN (SELECT MIN(bb.id) AS min_id, MAX(bb.id) AS max_id FROM user AS bb) AS foobars WHERE b.id > foobars.min_id GROUP BY b.col", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT b.col FROM music AS b JOIN (SELECT MIN(bb.id) AS min_id, MAX(bb.id) AS max_id FROM user AS bb) AS foobars WHERE b.id > foobars.min_id GROUP BY b.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "(0|1)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|1) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "foobars_min_id": 0 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "0 COLLATE utf8mb4_0900_ai_ci", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "min(0|2) AS min_id, max(1|2) AS max_id", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(bb.id) as min_id, max(bb.id) as max_id, weight_string(bb.id) from `user` as bb where 1 != 1 group by weight_string(bb.id)", + "OrderBy": "0 ASC COLLATE utf8mb4_0900_ai_ci", + "Query": "select min(bb.id) as min_id, max(bb.id) as max_id, weight_string(bb.id) from `user` as bb group by weight_string(bb.id) order by min(bb.id) asc", + "Table": "`user`" + } + ] + } + ] + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select b.col, weight_string(b.col) from music as b where 1 != 1 group by b.col, weight_string(b.col)", + "Query": "select b.col, weight_string(b.col) from music as b where b.id > :foobars_min_id group by b.col, weight_string(b.col)", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } } ] From e818ca560f4562951cd4343413281be1914c9790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Taylor?= Date: Thu, 30 Nov 2023 13:13:30 +0100 Subject: [PATCH 21/33] refactor: minor cleanups in planner code (#14642) --- go/vt/vtgate/planbuilder/operators/aggregator.go | 2 +- go/vt/vtgate/planbuilder/operators/phases.go | 15 ++++++++++----- go/vt/vtgate/planbuilder/simplifier_test.go | 11 +++-------- go/vt/vtgate/simplifier/expression_simplifier.go | 5 +++-- go/vt/vtgate/simplifier/simplifier.go | 2 +- go/vt/vtgate/simplifier/simplifier_test.go | 8 +++----- 6 files changed, 21 insertions(+), 22 deletions(-) diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index 269c4616274..e1848752e75 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -201,7 +201,7 @@ func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlpa } if addToGroupBy { - panic(vterrors.VT13001("did not expect to add group by here")) + panic(vterrors.VT13001(fmt.Sprintf("did not expect to add group by here: %s", sqlparser.String(expr)))) } return -1 diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go index f180cfc2ce0..557124e9320 100644 --- a/go/vt/vtgate/planbuilder/operators/phases.go +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -101,13 +101,18 @@ type phaser struct { } func (p *phaser) next(ctx *plancontext.PlanningContext) Phase { - for phas := p.current; phas < DONE; phas++ { - if phas.shouldRun(ctx.SemTable.QuerySignature) { - p.current = p.current + 1 - return phas + for { + curr := p.current + if curr == DONE { + return DONE + } + + p.current++ + + if curr.shouldRun(ctx.SemTable.QuerySignature) { + return curr } } - return DONE } func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 4c83af77933..56d310d2949 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -21,19 +21,14 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/vschemawrapper" - - "vitess.io/vitess/go/vt/vterrors" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/vtgate/simplifier" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/simplifier" ) // TestSimplifyBuggyQuery should be used to whenever we get a planner bug reported diff --git a/go/vt/vtgate/simplifier/expression_simplifier.go b/go/vt/vtgate/simplifier/expression_simplifier.go index 4537a137e76..5582ac3993d 100644 --- a/go/vt/vtgate/simplifier/expression_simplifier.go +++ b/go/vt/vtgate/simplifier/expression_simplifier.go @@ -21,7 +21,6 @@ import ( "strconv" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" ) @@ -44,10 +43,10 @@ func SimplifyExpr(in sqlparser.Expr, test CheckF) sqlparser.Expr { cursor.Replace(expr) valid := test(smallestKnown[0]) - log.Errorf("test: %t: simplified %s to %s, full expr: %s", valid, sqlparser.String(node), sqlparser.String(expr), sqlparser.String(smallestKnown)) if valid { break // we will still continue trying to simplify other expressions at this level } else { + log.Errorf("failed attempt: tried changing {%s} to {%s} in {%s}", sqlparser.String(node), sqlparser.String(expr), sqlparser.String(in)) // undo the change cursor.Replace(node) } @@ -105,6 +104,8 @@ func (s *shrinker) fillQueue() bool { s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.BinaryExpr: s.queue = append(s.queue, e.Left, e.Right) + case *sqlparser.BetweenExpr: + s.queue = append(s.queue, e.Left, e.From, e.To) case *sqlparser.Literal: switch e.Type { case sqlparser.StrVal: diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go index 0e19935caba..522da172557 100644 --- a/go/vt/vtgate/simplifier/simplifier.go +++ b/go/vt/vtgate/simplifier/simplifier.go @@ -201,7 +201,7 @@ func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, simplified := removeTable(clone, searchedTS, currentDB, si) name, _ := tbl.Name() if simplified && test(clone) { - log.Errorf("removed table %s: %s -> %s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) + log.Errorf("removed table `%s`: \n%s\n%s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) return clone } } diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index c9edbbab8d8..e2270a551b5 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -20,14 +20,12 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) func TestFindAllExpressions(t *testing.T) { From b28197bad47d020fb1c7f939c0cb07ed8e2acaed Mon Sep 17 00:00:00 2001 From: Rohit Nayak <57520317+rohit-nayak-ps@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:18:24 +0100 Subject: [PATCH 22/33] VReplication TableStreamer: Only stream tables in tablestreamer (ignore views) (#14646) Signed-off-by: Rohit Nayak --- go/test/endtoend/vreplication/fk_config_test.go | 1 + go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/go/test/endtoend/vreplication/fk_config_test.go b/go/test/endtoend/vreplication/fk_config_test.go index 5b02aeb62bb..deda1a1b296 100644 --- a/go/test/endtoend/vreplication/fk_config_test.go +++ b/go/test/endtoend/vreplication/fk_config_test.go @@ -20,6 +20,7 @@ var ( initialFKSchema = ` create table parent(id int, name varchar(128), primary key(id)) engine=innodb; create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; +create view vparent as select * from parent; ` initialFKData = ` insert into parent values(1, 'parent1'), (2, 'parent2'); diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go index 0bbd265435b..80f850dae2e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go @@ -23,12 +23,12 @@ import ( "strings" "sync/atomic" - "vitess.io/vitess/go/vt/vttablet" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -117,12 +117,16 @@ func (ts *tableStreamer) Stream() error { return err } - rs, err := conn.ExecuteFetch("show tables", -1, true) + rs, err := conn.ExecuteFetch("show full tables", -1, true) if err != nil { return err } for _, row := range rs.Rows { tableName := row[0].ToString() + tableType := row[1].ToString() + if tableType != tmutils.TableBaseTable { + continue + } if schema2.IsInternalOperationTableName(tableName) { log.Infof("Skipping internal table %s", tableName) continue From 565523651ac5d02f997744ffe397a7e50e64850a Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Thu, 30 Nov 2023 18:03:00 +0100 Subject: [PATCH 23/33] Allow for passing in the MySQL shutdown timeout (#14568) Signed-off-by: Dirkjan Bussink --- go/cmd/mysqlctl/command/shutdown.go | 5 +- go/cmd/mysqlctl/command/teardown.go | 5 +- go/cmd/mysqlctld/cli/mysqlctld.go | 13 +- go/cmd/vtbackup/cli/vtbackup.go | 68 +++-- go/cmd/vtcombo/cli/main.go | 14 +- go/flags/endtoend/mysqlctld.txt | 3 +- go/flags/endtoend/vtbackup.txt | 1 + go/flags/endtoend/vtcombo.txt | 1 + go/flags/endtoend/vttablet.txt | 1 + go/test/endtoend/utils/mysql.go | 5 +- go/vt/mysqlctl/backup.go | 2 +- go/vt/mysqlctl/backup_blackbox_test.go | 90 +++--- go/vt/mysqlctl/backup_test.go | 54 ++-- go/vt/mysqlctl/backupengine.go | 64 ++-- go/vt/mysqlctl/builtinbackupengine.go | 4 +- go/vt/mysqlctl/fakemysqldaemon.go | 2 +- go/vt/mysqlctl/grpcmysqlctlserver/server.go | 13 +- go/vt/mysqlctl/mysql_daemon.go | 3 +- go/vt/mysqlctl/mysqld.go | 8 +- go/vt/mysqlctl/xtrabackupengine.go | 2 +- go/vt/proto/mysqlctl/mysqlctl.pb.go | 317 ++++++++++---------- go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go | 53 +++- go/vt/vttablet/tabletmanager/restore.go | 32 +- go/vt/vttablet/tabletmanager/rpc_backup.go | 29 +- go/vt/vttablet/tabletmanager/tm_init.go | 4 +- go/vt/wrangler/testlib/backup_test.go | 14 +- proto/mysqlctl.proto | 1 + web/vtadmin/src/proto/vtadmin.d.ts | 6 + web/vtadmin/src/proto/vtadmin.js | 31 +- 29 files changed, 502 insertions(+), 343 deletions(-) diff --git a/go/cmd/mysqlctl/command/shutdown.go b/go/cmd/mysqlctl/command/shutdown.go index 41c804856eb..6e2e3a74a61 100644 --- a/go/cmd/mysqlctl/command/shutdown.go +++ b/go/cmd/mysqlctl/command/shutdown.go @@ -30,7 +30,6 @@ var Shutdown = &cobra.Command{ Use: "shutdown", Short: "Shuts down mysqld, without removing any files.", Long: "Stop a `mysqld` instance that was previously started with `init` or `start`.\n\n" + - "For large `mysqld` instances, you may need to extend the `wait_time` to shutdown cleanly.", Example: `mysqlctl --tablet_uid 101 --alsologtostderr shutdown`, Args: cobra.NoArgs, @@ -51,9 +50,9 @@ func commandShutdown(cmd *cobra.Command, args []string) error { } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), shutdownArgs.WaitTime) + ctx, cancel := context.WithTimeout(context.Background(), shutdownArgs.WaitTime+10*time.Second) defer cancel() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, shutdownArgs.WaitTime); err != nil { return fmt.Errorf("failed shutdown mysql: %v", err) } return nil diff --git a/go/cmd/mysqlctl/command/teardown.go b/go/cmd/mysqlctl/command/teardown.go index 0d37a15cfdc..4ad0539bdd1 100644 --- a/go/cmd/mysqlctl/command/teardown.go +++ b/go/cmd/mysqlctl/command/teardown.go @@ -32,7 +32,6 @@ var Teardown = &cobra.Command{ Long: "{{< warning >}}\n" + "This is a destructive operation.\n" + "{{}}\n\n" + - "Shuts down a `mysqld` instance and removes its data directory.", Example: `mysqlctl --tablet_uid 101 --alsologtostderr teardown`, Args: cobra.NoArgs, @@ -54,9 +53,9 @@ func commandTeardown(cmd *cobra.Command, args []string) error { } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), teardownArgs.WaitTime) + ctx, cancel := context.WithTimeout(context.Background(), teardownArgs.WaitTime+10*time.Second) defer cancel() - if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force); err != nil { + if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force, teardownArgs.WaitTime); err != nil { return fmt.Errorf("failed teardown mysql (forced? %v): %v", teardownArgs.Force, err) } return nil diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go index 6ebaa5dc422..51a0c47f56e 100644 --- a/go/cmd/mysqlctld/cli/mysqlctld.go +++ b/go/cmd/mysqlctld/cli/mysqlctld.go @@ -45,8 +45,9 @@ var ( mysqlSocket string // mysqlctl init flags - waitTime = 5 * time.Minute - initDBSQLFile string + waitTime = 5 * time.Minute + shutdownWaitTime = 5 * time.Minute + initDBSQLFile string Main = &cobra.Command{ Use: "mysqlctld", @@ -84,8 +85,9 @@ func init() { Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port") Main.Flags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID") Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file") - Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown") + Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup") Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization") + Main.Flags().DurationVar(&shutdownWaitTime, "shutdown-wait-time", shutdownWaitTime, "How long to wait for mysqld shutdown") acl.RegisterFlags(Main.Flags()) } @@ -154,8 +156,9 @@ func run(cmd *cobra.Command, args []string) error { // Take mysqld down with us on SIGTERM before entering lame duck. servenv.OnTermSync(func() { log.Infof("mysqlctl received SIGTERM, shutting down mysqld first") - ctx := context.Background() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), shutdownWaitTime+10*time.Second) + defer cancel() + if err := mysqld.Shutdown(ctx, cnf, true, shutdownWaitTime); err != nil { log.Errorf("failed to shutdown mysqld: %v", err) } }) diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go index 121ba39b8c5..560e9d21b2a 100644 --- a/go/cmd/vtbackup/cli/vtbackup.go +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -84,13 +84,14 @@ var ( incrementalFromPos string // mysqlctld-like flags - mysqlPort = 3306 - mysqlSocket string - mysqlTimeout = 5 * time.Minute - initDBSQLFile string - detachedMode bool - keepAliveTimeout time.Duration - disableRedoLog bool + mysqlPort = 3306 + mysqlSocket string + mysqlTimeout = 5 * time.Minute + mysqlShutdownTimeout = 5 * time.Minute + initDBSQLFile string + detachedMode bool + keepAliveTimeout time.Duration + disableRedoLog bool // Deprecated, use "Phase" instead. deprecatedDurationByPhase = stats.NewGaugesWithSingleLabel( @@ -207,6 +208,7 @@ func init() { Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket") Main.Flags().DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup") + Main.Flags().DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "how long to wait for mysqld startup") Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db") Main.Flags().BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal") Main.Flags().DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.") @@ -340,9 +342,9 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back defer func() { // Be careful not to use the original context, because we don't want to // skip shutdown just because we timed out waiting for other things. - mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), mysqlShutdownTimeout+10*time.Second) defer mysqlShutdownCancel() - if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil { + if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false, mysqlShutdownTimeout); err != nil { log.Errorf("failed to shutdown mysqld: %v", err) } }() @@ -356,18 +358,19 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } backupParams := mysqlctl.BackupParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - IncrementalFromPos: incrementalFromPos, - HookExtraEnv: extraEnv, - TopoServer: topoServer, - Keyspace: initKeyspace, - Shard: initShard, - TabletAlias: topoproto.TabletAliasString(tabletAlias), - Stats: backupstats.BackupStats(), - UpgradeSafe: upgradeSafe, + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + IncrementalFromPos: incrementalFromPos, + HookExtraEnv: extraEnv, + TopoServer: topoServer, + Keyspace: initKeyspace, + Shard: initShard, + TabletAlias: topoproto.TabletAliasString(tabletAlias), + Stats: backupstats.BackupStats(), + UpgradeSafe: upgradeSafe, + MysqlShutdownTimeout: mysqlShutdownTimeout, } // In initial_backup mode, just take a backup of this empty database. if initialBackup { @@ -416,16 +419,17 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back log.Infof("Restoring latest backup from directory %v", backupDir) restoreAt := time.Now() params := mysqlctl.RestoreParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - HookExtraEnv: extraEnv, - DeleteBeforeRestore: true, - DbName: dbName, - Keyspace: initKeyspace, - Shard: initShard, - Stats: backupstats.RestoreStats(), + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + HookExtraEnv: extraEnv, + DeleteBeforeRestore: true, + DbName: dbName, + Keyspace: initKeyspace, + Shard: initShard, + Stats: backupstats.RestoreStats(), + MysqlShutdownTimeout: mysqlShutdownTimeout, } backupManifest, err := mysqlctl.Restore(ctx, params) var restorePos replication.Position @@ -583,7 +587,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back return fmt.Errorf("Could not prep for full shutdown: %v", err) } // Shutdown, waiting for it to finish - if err := mysqld.Shutdown(ctx, mycnf, true); err != nil { + if err := mysqld.Shutdown(ctx, mycnf, true, mysqlShutdownTimeout); err != nil { return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err) } // Start MySQL, waiting for it to come up diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go index bfc0ad894fe..6912a886b18 100644 --- a/go/cmd/vtcombo/cli/main.go +++ b/go/cmd/vtcombo/cli/main.go @@ -147,6 +147,8 @@ func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err return mysqld, cnf, nil } +const mysqlShutdownTimeout = 5 * time.Minute + func run(cmd *cobra.Command, args []string) (err error) { // Stash away a copy of the topology that vtcombo was started with. // @@ -195,7 +197,9 @@ func run(cmd *cobra.Command, args []string) (err error) { return err } servenv.OnClose(func() { - mysqld.Shutdown(context.TODO(), cnf, true) + ctx, cancel := context.WithTimeout(context.Background(), mysqlShutdownTimeout+10*time.Second) + defer cancel() + mysqld.Shutdown(ctx, cnf, true, mysqlShutdownTimeout) }) // We want to ensure we can write to this database mysqld.SetReadOnly(false) @@ -217,7 +221,9 @@ func run(cmd *cobra.Command, args []string) (err error) { if err != nil { // ensure we start mysql in the event we fail here if startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) + ctx, cancel := context.WithTimeout(context.Background(), mysqlShutdownTimeout+10*time.Second) + defer cancel() + mysqld.Shutdown(ctx, cnf, true, mysqlShutdownTimeout) } return fmt.Errorf("initTabletMapProto failed: %w", err) @@ -264,7 +270,9 @@ func run(cmd *cobra.Command, args []string) (err error) { err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) if err != nil { if startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) + ctx, cancel := context.WithTimeout(context.Background(), mysqlShutdownTimeout+10*time.Second) + defer cancel() + mysqld.Shutdown(ctx, cnf, true, mysqlShutdownTimeout) } return fmt.Errorf("Couldn't build srv keyspace for (%v: %v). Got error: %w", ks, tpb.Cells, err) diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index 141c6697070..ccb89f08bf5 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -110,6 +110,7 @@ Flags: --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice + --shutdown-wait-time duration How long to wait for mysqld shutdown (default 5m0s) --socket_file string Local unix socket file to listen on --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class @@ -118,4 +119,4 @@ Flags: --v Level log level for V logs -v, --version print binary version --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging - --wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s) + --wait_time duration How long to wait for mysqld startup (default 5m0s) diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 8610606eca2..3971472b74b 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -174,6 +174,7 @@ Flags: --mycnf_slow_log_path string mysql slow query log path --mycnf_socket_file string mysql socket file --mycnf_tmp_dir string mysql tmp directory + --mysql-shutdown-timeout duration how long to wait for mysqld startup (default 5m0s) --mysql_port int mysql port (default 3306) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysql_socket string path to the mysql socket diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index 40720e702b7..d32df437787 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -222,6 +222,7 @@ Flags: --mycnf_tmp_dir string mysql tmp directory --mysql-server-keepalive-period duration TCP period between keep-alives --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers + --mysql-shutdown-timeout duration timeout to use when MySQL is being shut down. (default 5m0s) --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections. --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static") --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP") diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index 7dc29a140b1..f3085f71d2f 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -242,6 +242,7 @@ Flags: --mycnf_slow_log_path string mysql slow query log path --mycnf_socket_file string mysql socket file --mycnf_tmp_dir string mysql tmp directory + --mysql-shutdown-timeout duration timeout to use when MySQL is being shut down. (default 5m0s) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index de8ce40f992..888c0cc8959 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -23,6 +23,7 @@ import ( "os" "path" "testing" + "time" "github.com/stretchr/testify/assert" @@ -35,6 +36,8 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" ) +const mysqlShutdownTimeout = 1 * time.Minute + // NewMySQL creates a new MySQL server using the local mysqld binary. The name of the database // will be set to `dbName`. SQL queries that need to be executed on the new MySQL instance // can be passed through the `schemaSQL` argument. @@ -96,7 +99,7 @@ func NewMySQLWithMysqld(port int, hostname, dbName string, schemaSQL ...string) } return params, mysqld, func() { ctx := context.Background() - _ = mysqld.Teardown(ctx, mycnf, true) + _ = mysqld.Teardown(ctx, mycnf, true, mysqlShutdownTimeout) }, nil } diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index e9f0b19d54a..5a9706b07f8 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -453,7 +453,7 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // The MySQL manual recommends restarting mysqld after running mysql_upgrade, // so that any changes made to system tables take effect. params.Logger.Infof("Restore: restarting mysqld after mysql_upgrade") - if err := params.Mysqld.Shutdown(context.Background(), params.Cnf, true); err != nil { + if err := params.Mysqld.Shutdown(context.Background(), params.Cnf, true, params.MysqlShutdownTimeout); err != nil { return nil, err } if err := params.Mysqld.Start(context.Background(), params.Cnf); err != nil { diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/backup_blackbox_test.go index 8de6a8679fa..b174b60ed1d 100644 --- a/go/vt/mysqlctl/backup_blackbox_test.go +++ b/go/vt/mysqlctl/backup_blackbox_test.go @@ -47,6 +47,8 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" ) +const mysqlShutdownTimeout = 1 * time.Minute + func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { old := mysqlctl.BuiltinBackupMysqldTimeout mysqlctl.BuiltinBackupMysqldTimeout = t @@ -154,12 +156,13 @@ func TestExecuteBackup(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - Stats: fakeStats, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -213,10 +216,11 @@ func TestExecuteBackup(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) assert.Error(t, err) @@ -299,12 +303,13 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Concurrency: 2, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - Stats: backupstats.NewFakeStats(), - UpgradeSafe: true, + Concurrency: 2, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: backupstats.NewFakeStats(), + UpgradeSafe: true, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -385,12 +390,13 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.Error(t, err) @@ -469,12 +475,13 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -500,19 +507,20 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), }, - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - DeleteBeforeRestore: false, - DbName: "test", - Keyspace: "test", - Shard: "-", - StartTime: time.Now(), - RestoreToPos: replication.Position{}, - RestoreToTimestamp: time.Time{}, - DryRun: false, - Stats: fakeStats, + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } // Successful restore. diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go index 5b97f709c2f..ad7e0faab98 100644 --- a/go/vt/mysqlctl/backup_test.go +++ b/go/vt/mysqlctl/backup_test.go @@ -42,6 +42,8 @@ import ( "vitess.io/vitess/go/vt/mysqlctl/backupstorage" ) +const mysqlShutdownTimeout = 1 * time.Minute + // TestBackupExecutesBackupWithScopedParams tests that Backup passes // a Scope()-ed stats to backupengine ExecuteBackup. func TestBackupExecutesBackupWithScopedParams(t *testing.T) { @@ -563,7 +565,7 @@ func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { sqldb := fakesqldb.New(t) sqldb.SetNeverFail(true) mysqld := NewFakeMysqlDaemon(sqldb) - require.Nil(t, mysqld.Shutdown(ctx, nil, false)) + require.Nil(t, mysqld.Shutdown(ctx, nil, false, mysqlShutdownTimeout)) dirName, err := os.MkdirTemp("", "vt_backup_test") require.Nil(t, err) @@ -575,33 +577,35 @@ func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { stats := backupstats.NewFakeStats() backupParams := BackupParams{ - Cnf: cnf, - Logger: logger, - Mysqld: mysqld, - Concurrency: 1, - HookExtraEnv: map[string]string{}, - TopoServer: nil, - Keyspace: "test", - Shard: "-", - BackupTime: time.Now(), - IncrementalFromPos: "", - Stats: stats, + Cnf: cnf, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: nil, + Keyspace: "test", + Shard: "-", + BackupTime: time.Now(), + IncrementalFromPos: "", + Stats: stats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } restoreParams := RestoreParams{ - Cnf: cnf, - Logger: logger, - Mysqld: mysqld, - Concurrency: 1, - HookExtraEnv: map[string]string{}, - DeleteBeforeRestore: false, - DbName: "test", - Keyspace: "test", - Shard: "-", - StartTime: time.Now(), - RestoreToPos: replication.Position{}, - DryRun: false, - Stats: stats, + Cnf: cnf, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + DryRun: false, + Stats: stats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } manifest := BackupManifest{ diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index 5a79edbdde0..cf6065cd733 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -77,23 +77,26 @@ type BackupParams struct { Stats backupstats.Stats // UpgradeSafe indicates whether the backup is safe for upgrade and created with innodb_fast_shutdown=0 UpgradeSafe bool + // MysqlShutdownTimeout defines how long we wait during MySQL shutdown if that is part of the backup process. + MysqlShutdownTimeout time.Duration } func (b *BackupParams) Copy() BackupParams { return BackupParams{ - Cnf: b.Cnf, - Mysqld: b.Mysqld, - Logger: b.Logger, - Concurrency: b.Concurrency, - HookExtraEnv: b.HookExtraEnv, - TopoServer: b.TopoServer, - Keyspace: b.Keyspace, - Shard: b.Shard, - TabletAlias: b.TabletAlias, - BackupTime: b.BackupTime, - IncrementalFromPos: b.IncrementalFromPos, - Stats: b.Stats, - UpgradeSafe: b.UpgradeSafe, + Cnf: b.Cnf, + Mysqld: b.Mysqld, + Logger: b.Logger, + Concurrency: b.Concurrency, + HookExtraEnv: b.HookExtraEnv, + TopoServer: b.TopoServer, + Keyspace: b.Keyspace, + Shard: b.Shard, + TabletAlias: b.TabletAlias, + BackupTime: b.BackupTime, + IncrementalFromPos: b.IncrementalFromPos, + Stats: b.Stats, + UpgradeSafe: b.UpgradeSafe, + MysqlShutdownTimeout: b.MysqlShutdownTimeout, } } @@ -130,24 +133,27 @@ type RestoreParams struct { DryRun bool // Stats let's restore engines report detailed restore timings. Stats backupstats.Stats + // MysqlShutdownTimeout defines how long we wait during MySQL shutdown if that is part of the backup process. + MysqlShutdownTimeout time.Duration } func (p *RestoreParams) Copy() RestoreParams { return RestoreParams{ - Cnf: p.Cnf, - Mysqld: p.Mysqld, - Logger: p.Logger, - Concurrency: p.Concurrency, - HookExtraEnv: p.HookExtraEnv, - DeleteBeforeRestore: p.DeleteBeforeRestore, - DbName: p.DbName, - Keyspace: p.Keyspace, - Shard: p.Shard, - StartTime: p.StartTime, - RestoreToPos: p.RestoreToPos, - RestoreToTimestamp: p.RestoreToTimestamp, - DryRun: p.DryRun, - Stats: p.Stats, + Cnf: p.Cnf, + Mysqld: p.Mysqld, + Logger: p.Logger, + Concurrency: p.Concurrency, + HookExtraEnv: p.HookExtraEnv, + DeleteBeforeRestore: p.DeleteBeforeRestore, + DbName: p.DbName, + Keyspace: p.Keyspace, + Shard: p.Shard, + StartTime: p.StartTime, + RestoreToPos: p.RestoreToPos, + RestoreToTimestamp: p.RestoreToTimestamp, + DryRun: p.DryRun, + Stats: p.Stats, + MysqlShutdownTimeout: p.MysqlShutdownTimeout, } } @@ -584,10 +590,10 @@ func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe b return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) } -func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger) error { +func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, mysqlShutdownTimeout time.Duration) error { // shutdown mysqld if it is running logger.Infof("Restore: shutdown mysqld") - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, mysqlShutdownTimeout); err != nil { return err } diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index e46932bcd51..57f55cefb2f 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -468,7 +468,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // shutdown mysqld shutdownCtx, cancel := context.WithTimeout(ctx, BuiltinBackupMysqldTimeout) - err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true) + err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true, params.MysqlShutdownTimeout) defer cancel() if err != nil { return false, vterrors.Wrap(err, "can't shutdown mysqld") @@ -886,7 +886,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara // executeRestoreFullBackup restores the files from a full backup. The underlying mysql database service is expected to be stopped. func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { - if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { return err } diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index 791b43da583..521a839bf78 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -220,7 +220,7 @@ func (fmd *FakeMysqlDaemon) Start(ctx context.Context, cnf *Mycnf, mysqldArgs .. } // Shutdown is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error { +func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, mysqlShutdownTimeout time.Duration) error { if !fmd.Running { return fmt.Errorf("fake mysql daemon not running") } diff --git a/go/vt/mysqlctl/grpcmysqlctlserver/server.go b/go/vt/mysqlctl/grpcmysqlctlserver/server.go index ac3b5092f19..38ce4b2b2df 100644 --- a/go/vt/mysqlctl/grpcmysqlctlserver/server.go +++ b/go/vt/mysqlctl/grpcmysqlctlserver/server.go @@ -22,9 +22,11 @@ package grpcmysqlctlserver import ( "context" + "time" "google.golang.org/grpc" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/mysqlctl" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" ) @@ -41,9 +43,18 @@ func (s *server) Start(ctx context.Context, request *mysqlctlpb.StartRequest) (* return &mysqlctlpb.StartResponse{}, s.mysqld.Start(ctx, s.cnf, request.MysqldArgs...) } +const mysqlShutdownTimeout = 5 * time.Minute + // Shutdown implements the server side of the MysqlctlClient interface. func (s *server) Shutdown(ctx context.Context, request *mysqlctlpb.ShutdownRequest) (*mysqlctlpb.ShutdownResponse, error) { - return &mysqlctlpb.ShutdownResponse{}, s.mysqld.Shutdown(ctx, s.cnf, request.WaitForMysqld) + timeout, ok, err := protoutil.DurationFromProto(request.MysqlShutdownTimeout) + if err != nil { + return nil, err + } + if !ok { + timeout = mysqlShutdownTimeout + } + return &mysqlctlpb.ShutdownResponse{}, s.mysqld.Shutdown(ctx, s.cnf, request.WaitForMysqld, timeout) } // RunMysqlUpgrade implements the server side of the MysqlctlClient interface. diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index f50d368ed7d..66454e8b8a8 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -18,6 +18,7 @@ package mysqlctl import ( "context" + "time" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -33,7 +34,7 @@ import ( type MysqlDaemon interface { // methods related to mysql running or not Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error - Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error + Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, mysqlShutdownTimeout time.Duration) error RunMysqlUpgrade(ctx context.Context) error ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 52f806fe29b..9864d909b2f 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -522,7 +522,7 @@ func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnPa // flushed - on the order of 20-30 minutes. // // If a mysqlctld address is provided in a flag, Shutdown will run remotely. -func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error { +func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, shutdownTimeout time.Duration) error { log.Infof("Mysqld.Shutdown") // Execute as remote action on mysqlctld if requested. @@ -581,7 +581,7 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo defer os.Remove(cnf) args := []string{ "--defaults-extra-file=" + cnf, - "--shutdown-timeout=300", + fmt.Sprintf("--shutdown-timeout=%d", int(shutdownTimeout.Seconds())), "--connect-timeout=30", "--wait=10", "shutdown", @@ -1024,9 +1024,9 @@ func (mysqld *Mysqld) createTopDir(cnf *Mycnf, dir string) error { } // Teardown will shutdown the running daemon, and delete the root directory. -func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool) error { +func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool, shutdownTimeout time.Duration) error { log.Infof("mysqlctl.Teardown") - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, shutdownTimeout); err != nil { log.Warningf("failed mysqld shutdown: %v", err.Error()) if !force { return err diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index d11699167d9..a61551c3dcf 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -484,7 +484,7 @@ func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestorePa return nil, err } - if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { return nil, err } diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index 87d07442c8c..90c120243ec 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -190,7 +190,8 @@ type ShutdownRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - WaitForMysqld bool `protobuf:"varint,1,opt,name=wait_for_mysqld,json=waitForMysqld,proto3" json:"wait_for_mysqld,omitempty"` + WaitForMysqld bool `protobuf:"varint,1,opt,name=wait_for_mysqld,json=waitForMysqld,proto3" json:"wait_for_mysqld,omitempty"` + MysqlShutdownTimeout *vttime.Duration `protobuf:"bytes,2,opt,name=mysql_shutdown_timeout,json=mysqlShutdownTimeout,proto3" json:"mysql_shutdown_timeout,omitempty"` } func (x *ShutdownRequest) Reset() { @@ -232,6 +233,13 @@ func (x *ShutdownRequest) GetWaitForMysqld() bool { return false } +func (x *ShutdownRequest) GetMysqlShutdownTimeout() *vttime.Duration { + if x != nil { + return x.MysqlShutdownTimeout + } + return nil +} + type ShutdownResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -922,129 +930,134 @@ var file_mysqlctl_proto_rawDesc = []byte{ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x41, 0x72, 0x67, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0f, 0x53, 0x68, - 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, - 0x0f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x4d, - 0x79, 0x73, 0x71, 0x6c, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, - 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, - 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, - 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, - 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, - 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x0f, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, + 0x0a, 0x0f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, + 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x12, 0x46, 0x0a, 0x16, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, + 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x12, + 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, + 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, + 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, + 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, + 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, + 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, + 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, + 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, + 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, - 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, - 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, - 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, - 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, - 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, - 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, - 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, - 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, - 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, - 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, - 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, - 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, - 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, - 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, - 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, - 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, - 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, - 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, - 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, - 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, - 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1080,37 +1093,39 @@ var file_mysqlctl_proto_goTypes = []interface{}{ (*VersionStringRequest)(nil), // 15: mysqlctl.VersionStringRequest (*VersionStringResponse)(nil), // 16: mysqlctl.VersionStringResponse (*BackupInfo)(nil), // 17: mysqlctl.BackupInfo - (*vttime.Time)(nil), // 18: vttime.Time - (*topodata.TabletAlias)(nil), // 19: topodata.TabletAlias + (*vttime.Duration)(nil), // 18: vttime.Duration + (*vttime.Time)(nil), // 19: vttime.Time + (*topodata.TabletAlias)(nil), // 20: topodata.TabletAlias } var file_mysqlctl_proto_depIdxs = []int32{ - 18, // 0: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time - 18, // 1: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time - 18, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time - 19, // 3: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias - 18, // 4: mysqlctl.BackupInfo.time:type_name -> vttime.Time - 0, // 5: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status - 1, // 6: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest - 3, // 7: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest - 5, // 8: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest - 7, // 9: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest - 9, // 10: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest - 11, // 11: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest - 13, // 12: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest - 15, // 13: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest - 2, // 14: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse - 4, // 15: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse - 6, // 16: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse - 8, // 17: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse - 10, // 18: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse - 12, // 19: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse - 14, // 20: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse - 16, // 21: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse - 14, // [14:22] is the sub-list for method output_type - 6, // [6:14] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 18, // 0: mysqlctl.ShutdownRequest.mysql_shutdown_timeout:type_name -> vttime.Duration + 19, // 1: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time + 19, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time + 19, // 3: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time + 20, // 4: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias + 19, // 5: mysqlctl.BackupInfo.time:type_name -> vttime.Time + 0, // 6: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status + 1, // 7: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest + 3, // 8: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest + 5, // 9: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest + 7, // 10: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest + 9, // 11: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest + 11, // 12: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest + 13, // 13: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest + 15, // 14: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest + 2, // 15: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse + 4, // 16: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse + 6, // 17: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse + 8, // 18: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse + 10, // 19: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse + 12, // 20: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse + 14, // 21: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse + 16, // 22: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse + 15, // [15:23] is the sub-list for method output_type + 7, // [7:15] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_mysqlctl_proto_init() } diff --git a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go index bb2ec78e03a..fab1af2f471 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go @@ -63,7 +63,8 @@ func (m *ShutdownRequest) CloneVT() *ShutdownRequest { return (*ShutdownRequest)(nil) } r := &ShutdownRequest{ - WaitForMysqld: m.WaitForMysqld, + WaitForMysqld: m.WaitForMysqld, + MysqlShutdownTimeout: m.MysqlShutdownTimeout.CloneVT(), } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -430,6 +431,16 @@ func (m *ShutdownRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MysqlShutdownTimeout != nil { + size, err := m.MysqlShutdownTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } if m.WaitForMysqld { i-- if m.WaitForMysqld { @@ -1085,6 +1096,10 @@ func (m *ShutdownRequest) SizeVT() (n int) { if m.WaitForMysqld { n += 2 } + if m.MysqlShutdownTimeout != nil { + l = m.MysqlShutdownTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1487,6 +1502,42 @@ func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { } } m.WaitForMysqld = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqlShutdownTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MysqlShutdownTimeout == nil { + m.MysqlShutdownTimeout = &vttime.Duration{} + } + if err := m.MysqlShutdownTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index 032b7357d43..e606e71a267 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -131,7 +131,8 @@ func (tm *TabletManager) RestoreData( deleteBeforeRestore bool, backupTime time.Time, restoreToTimetamp time.Time, - restoreToPos string) error { + restoreToPos string, + mysqlShutdownTimeout time.Duration) error { if err := tm.lock(ctx); err != nil { return err } @@ -180,14 +181,14 @@ func (tm *TabletManager) RestoreData( RestoreToPos: restoreToPos, RestoreToTimestamp: protoutil.TimeToProto(restoreToTimetamp), } - err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req) + err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req, mysqlShutdownTimeout) if err != nil { return err } return nil } -func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, request *tabletmanagerdatapb.RestoreFromBackupRequest) error { +func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, request *tabletmanagerdatapb.RestoreFromBackupRequest, mysqlShutdownTimeout time.Duration) error { tablet := tm.Tablet() originalType := tablet.Type @@ -217,18 +218,19 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L } params := mysqlctl.RestoreParams{ - Cnf: tm.Cnf, - Mysqld: tm.MysqlDaemon, - Logger: logger, - Concurrency: restoreConcurrency, - HookExtraEnv: tm.hookExtraEnv(), - DeleteBeforeRestore: deleteBeforeRestore, - DbName: topoproto.TabletDbName(tablet), - Keyspace: keyspace, - Shard: tablet.Shard, - StartTime: startTime, - DryRun: request.DryRun, - Stats: backupstats.RestoreStats(), + Cnf: tm.Cnf, + Mysqld: tm.MysqlDaemon, + Logger: logger, + Concurrency: restoreConcurrency, + HookExtraEnv: tm.hookExtraEnv(), + DeleteBeforeRestore: deleteBeforeRestore, + DbName: topoproto.TabletDbName(tablet), + Keyspace: keyspace, + Shard: tablet.Shard, + StartTime: startTime, + DryRun: request.DryRun, + Stats: backupstats.RestoreStats(), + MysqlShutdownTimeout: mysqlShutdownTimeout, } restoreToTimestamp := protoutil.TimeFromProto(request.RestoreToTimestamp).UTC() if request.RestoreToPos != "" && !restoreToTimestamp.IsZero() { diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index b3d2e2794f6..9c361eac400 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -149,19 +149,20 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req // Now we can run the backup. backupParams := mysqlctl.BackupParams{ - Cnf: tm.Cnf, - Mysqld: tm.MysqlDaemon, - Logger: l, - Concurrency: int(req.Concurrency), - IncrementalFromPos: req.IncrementalFromPos, - HookExtraEnv: tm.hookExtraEnv(), - TopoServer: tm.TopoServer, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - TabletAlias: topoproto.TabletAliasString(tablet.Alias), - BackupTime: time.Now(), - Stats: backupstats.BackupStats(), - UpgradeSafe: req.UpgradeSafe, + Cnf: tm.Cnf, + Mysqld: tm.MysqlDaemon, + Logger: l, + Concurrency: int(req.Concurrency), + IncrementalFromPos: req.IncrementalFromPos, + HookExtraEnv: tm.hookExtraEnv(), + TopoServer: tm.TopoServer, + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + TabletAlias: topoproto.TabletAliasString(tablet.Alias), + BackupTime: time.Now(), + Stats: backupstats.BackupStats(), + UpgradeSafe: req.UpgradeSafe, + MysqlShutdownTimeout: mysqlShutdownTimeout, } returnErr := mysqlctl.Backup(ctx, backupParams) @@ -189,7 +190,7 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) // Now we can run restore. - err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, request) + err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, request, mysqlShutdownTimeout) // Re-run health check to be sure to capture any replication delay. tm.QueryServiceControl.BroadcastHealth() diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 2873c3a1d5e..c1b6c837d50 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -88,6 +88,7 @@ var ( initPopulateMetadata bool initTimeout = 1 * time.Minute + mysqlShutdownTimeout = 5 * time.Minute ) func registerInitFlags(fs *pflag.FlagSet) { @@ -99,6 +100,7 @@ func registerInitFlags(fs *pflag.FlagSet) { fs.StringVar(&skipBuildInfoTags, "vttablet_skip_buildinfo_tags", skipBuildInfoTags, "comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'.") fs.Var(&initTags, "init_tags", "(init parameter) comma separated list of key:value pairs used to tag the tablet") fs.DurationVar(&initTimeout, "init_timeout", initTimeout, "(init parameter) timeout to use for the init phase.") + fs.DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "timeout to use when MySQL is being shut down.") } var ( @@ -798,7 +800,7 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { } // restoreFromBackup will just be a regular action // (same as if it was triggered remotely) - if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos); err != nil { + if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos, mysqlShutdownTimeout); err != nil { log.Exitf("RestoreFromBackup failed: %v", err) } }() diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 787e4ce1946..0ba8adc9a06 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -47,6 +47,8 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +const mysqlShutdownTimeout = 1 * time.Minute + type compressionDetails struct { CompressionEngineName string ExternalCompressorCmd string @@ -263,7 +265,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, time.Time{} /* restoreToTimestamp */, "") + err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout) if err != nil { return err } @@ -302,7 +304,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.CurrentPrimaryPosition // restore primary from latest backup - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, ""), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout), "RestoreData failed") // tablet was created as PRIMARY, so it's baseTabletType is PRIMARY assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) @@ -318,7 +320,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } // Test restore with the backup timestamp - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, time.Time{} /* restoreToTimestamp */, ""), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout), "RestoreData with backup timestamp failed") assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) assert.False(t, primary.FakeMysqlDaemon.Replicating) @@ -519,7 +521,7 @@ func TestBackupRestoreLagged(t *testing.T) { errCh = make(chan error, 1) go func(ctx context.Context, tablet *FakeTablet) { - errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "") + errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout) }(ctx, destTablet) timer = time.NewTicker(1 * time.Second) @@ -713,7 +715,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { // set a short timeout so that we don't have to wait 30 seconds topo.RemoteOperationTimeout = 2 * time.Second // Restore should still succeed - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout)) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.True(t, destTablet.FakeMysqlDaemon.Replicating) @@ -867,7 +869,7 @@ func TestDisableActiveReparents(t *testing.T) { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout)) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.False(t, destTablet.FakeMysqlDaemon.Replicating) diff --git a/proto/mysqlctl.proto b/proto/mysqlctl.proto index bc67cef07c1..7e5fe13b991 100644 --- a/proto/mysqlctl.proto +++ b/proto/mysqlctl.proto @@ -33,6 +33,7 @@ message StartResponse{} message ShutdownRequest{ bool wait_for_mysqld = 1; + vttime.Duration mysql_shutdown_timeout = 2; } message ShutdownResponse{} diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index f5f7c100cc5..11489a0acc2 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -12649,6 +12649,9 @@ export namespace mysqlctl { /** ShutdownRequest wait_for_mysqld */ wait_for_mysqld?: (boolean|null); + + /** ShutdownRequest mysql_shutdown_timeout */ + mysql_shutdown_timeout?: (vttime.IDuration|null); } /** Represents a ShutdownRequest. */ @@ -12663,6 +12666,9 @@ export namespace mysqlctl { /** ShutdownRequest wait_for_mysqld. */ public wait_for_mysqld: boolean; + /** ShutdownRequest mysql_shutdown_timeout. */ + public mysql_shutdown_timeout?: (vttime.IDuration|null); + /** * Creates a new ShutdownRequest instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index a7fcd29e3f7..71d67a61ed1 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -29336,6 +29336,7 @@ export const mysqlctl = $root.mysqlctl = (() => { * @memberof mysqlctl * @interface IShutdownRequest * @property {boolean|null} [wait_for_mysqld] ShutdownRequest wait_for_mysqld + * @property {vttime.IDuration|null} [mysql_shutdown_timeout] ShutdownRequest mysql_shutdown_timeout */ /** @@ -29361,6 +29362,14 @@ export const mysqlctl = $root.mysqlctl = (() => { */ ShutdownRequest.prototype.wait_for_mysqld = false; + /** + * ShutdownRequest mysql_shutdown_timeout. + * @member {vttime.IDuration|null|undefined} mysql_shutdown_timeout + * @memberof mysqlctl.ShutdownRequest + * @instance + */ + ShutdownRequest.prototype.mysql_shutdown_timeout = null; + /** * Creates a new ShutdownRequest instance using the specified properties. * @function create @@ -29387,6 +29396,8 @@ export const mysqlctl = $root.mysqlctl = (() => { writer = $Writer.create(); if (message.wait_for_mysqld != null && Object.hasOwnProperty.call(message, "wait_for_mysqld")) writer.uint32(/* id 1, wireType 0 =*/8).bool(message.wait_for_mysqld); + if (message.mysql_shutdown_timeout != null && Object.hasOwnProperty.call(message, "mysql_shutdown_timeout")) + $root.vttime.Duration.encode(message.mysql_shutdown_timeout, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; @@ -29425,6 +29436,10 @@ export const mysqlctl = $root.mysqlctl = (() => { message.wait_for_mysqld = reader.bool(); break; } + case 2: { + message.mysql_shutdown_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -29463,6 +29478,11 @@ export const mysqlctl = $root.mysqlctl = (() => { if (message.wait_for_mysqld != null && message.hasOwnProperty("wait_for_mysqld")) if (typeof message.wait_for_mysqld !== "boolean") return "wait_for_mysqld: boolean expected"; + if (message.mysql_shutdown_timeout != null && message.hasOwnProperty("mysql_shutdown_timeout")) { + let error = $root.vttime.Duration.verify(message.mysql_shutdown_timeout); + if (error) + return "mysql_shutdown_timeout." + error; + } return null; }; @@ -29480,6 +29500,11 @@ export const mysqlctl = $root.mysqlctl = (() => { let message = new $root.mysqlctl.ShutdownRequest(); if (object.wait_for_mysqld != null) message.wait_for_mysqld = Boolean(object.wait_for_mysqld); + if (object.mysql_shutdown_timeout != null) { + if (typeof object.mysql_shutdown_timeout !== "object") + throw TypeError(".mysqlctl.ShutdownRequest.mysql_shutdown_timeout: object expected"); + message.mysql_shutdown_timeout = $root.vttime.Duration.fromObject(object.mysql_shutdown_timeout); + } return message; }; @@ -29496,10 +29521,14 @@ export const mysqlctl = $root.mysqlctl = (() => { if (!options) options = {}; let object = {}; - if (options.defaults) + if (options.defaults) { object.wait_for_mysqld = false; + object.mysql_shutdown_timeout = null; + } if (message.wait_for_mysqld != null && message.hasOwnProperty("wait_for_mysqld")) object.wait_for_mysqld = message.wait_for_mysqld; + if (message.mysql_shutdown_timeout != null && message.hasOwnProperty("mysql_shutdown_timeout")) + object.mysql_shutdown_timeout = $root.vttime.Duration.toObject(message.mysql_shutdown_timeout, options); return object; }; From fabc23c444a4f52a3662198686369033a0270a9f Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Thu, 30 Nov 2023 18:31:02 +0100 Subject: [PATCH 24/33] mysqlctl: Error out on stale socket (#14650) Signed-off-by: Dirkjan Bussink --- go/vt/mysqlctl/mysqld.go | 6 +++--- go/vt/mysqlctl/mysqld_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 9864d909b2f..731e608a2c0 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -461,13 +461,13 @@ func cleanupLockfile(socket string, ts string) error { if err == nil { // If the process still exists, it's not safe to // remove the lock file, so we have to keep it around. - log.Warningf("%v: not removing socket lock file: %v", ts, lockPath) - return nil + log.Errorf("%v: not removing socket lock file: %v with pid %v", ts, lockPath, p) + return fmt.Errorf("process %v is still running", p) } if !errors.Is(err, os.ErrProcessDone) { // Any errors except for the process being done // is unexpected here. - log.Errorf("%v: error checking process: %v", ts, err) + log.Errorf("%v: error checking process %v: %v", ts, p, err) return err } diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go index b45ab5bd6d8..0caba8c904d 100644 --- a/go/vt/mysqlctl/mysqld_test.go +++ b/go/vt/mysqlctl/mysqld_test.go @@ -195,6 +195,6 @@ func TestCleanupLockfile(t *testing.T) { // If the lockfile exists, and the process is found, we don't clean it up. os.WriteFile("mysql.sock.lock", []byte(strconv.Itoa(os.Getpid())), 0o600) - assert.NoError(t, cleanupLockfile("mysql.sock", ts)) + assert.Error(t, cleanupLockfile("mysql.sock", ts)) assert.FileExists(t, "mysql.sock.lock") } From cd9693c40a7965e7aebaf8c749e9742c6dca0729 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 30 Nov 2023 12:46:46 -0500 Subject: [PATCH 25/33] Flakes: Shutdown vttablet before mysqld in backup tests (#14647) Signed-off-by: Matt Lord --- .../endtoend/backup/vtbackup/backup_only_test.go | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 5e80d5d3cc3..3cce3053406 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -300,11 +300,12 @@ func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool) extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.ExtraArgs = extraArgs - // Shutdown Mysql - err := tablet.MysqlctlProcess.Stop() - require.Nil(t, err) // Teardown Tablet - err = tablet.VttabletProcess.TearDown() + err := tablet.VttabletProcess.TearDown() + require.Nil(t, err) + + // Shutdown Mysql + err = tablet.MysqlctlProcess.Stop() require.Nil(t, err) // Clear out the previous data @@ -335,13 +336,7 @@ func tearDown(t *testing.T, initMysql bool) { require.Nil(t, err) } - // TODO: Ideally we should not be resetting the mysql. - // So in below code we will have to uncomment the commented code and remove resetTabletDirectory for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { - //Tear down Tablet - //err := tablet.VttabletProcess.TearDown() - //require.Nil(t, err) - resetTabletDirectory(t, tablet, initMysql) // DeleteTablet on a primary will cause tablet to shutdown, so should only call it after tablet is already shut down err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) From 27f000923d179eda43f9cf99bd6585880b032160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Taylor?= Date: Mon, 4 Dec 2023 11:34:01 +0100 Subject: [PATCH 26/33] planbuilder: clean up code (#14657) Signed-off-by: Andres Taylor --- .../planbuilder/operator_transformers.go | 33 +- .../planbuilder/operators/SQL_builder.go | 9 +- .../operators/aggregation_pushing.go | 168 ++++----- .../planbuilder/operators/aggregator.go | 29 +- .../planbuilder/operators/apply_join.go | 54 ++- .../vtgate/planbuilder/operators/ast_to_op.go | 151 ++++---- .../vtgate/planbuilder/operators/comments.go | 15 +- go/vt/vtgate/planbuilder/operators/delete.go | 81 ++--- .../vtgate/planbuilder/operators/distinct.go | 17 +- .../planbuilder/operators/dml_planning.go | 34 +- .../planbuilder/operators/expressions.go | 5 +- go/vt/vtgate/planbuilder/operators/filter.go | 28 +- .../planbuilder/operators/fk_cascade.go | 19 +- .../vtgate/planbuilder/operators/fk_verify.go | 17 +- .../vtgate/planbuilder/operators/hash_join.go | 39 +- go/vt/vtgate/planbuilder/operators/helpers.go | 54 +-- go/vt/vtgate/planbuilder/operators/horizon.go | 32 +- .../operators/horizon_expanding.go | 68 ++-- .../operators/info_schema_planning.go | 15 +- go/vt/vtgate/planbuilder/operators/insert.go | 155 ++++---- .../planbuilder/operators/insert_selection.go | 17 +- go/vt/vtgate/planbuilder/operators/join.go | 51 ++- .../planbuilder/operators/join_merging.go | 5 +- go/vt/vtgate/planbuilder/operators/joins.go | 15 +- go/vt/vtgate/planbuilder/operators/limit.go | 15 +- .../planbuilder/operators/misc_routing.go | 36 +- .../planbuilder/operators/offset_planning.go | 46 ++- .../vtgate/planbuilder/operators/operator.go | 155 ++------ .../planbuilder/operators/operator_funcs.go | 3 +- go/vt/vtgate/planbuilder/operators/ops/op.go | 77 ---- .../vtgate/planbuilder/operators/ordering.go | 21 +- go/vt/vtgate/planbuilder/operators/phases.go | 68 ++-- .../planbuilder/operators/plan_query.go | 165 +++++++++ .../planbuilder/operators/projection.go | 102 +++--- .../planbuilder/operators/query_planning.go | 333 ++++++++---------- .../planbuilder/operators/querygraph.go | 9 +- .../planbuilder/operators/queryprojection.go | 53 +-- .../operators/queryprojection_test.go | 9 +- .../operators/{rewrite => }/rewriters.go | 138 +++----- go/vt/vtgate/planbuilder/operators/route.go | 106 ++---- .../planbuilder/operators/route_planning.go | 186 ++++------ .../planbuilder/operators/sequential.go | 11 +- .../planbuilder/operators/sharded_routing.go | 57 ++- .../vtgate/planbuilder/operators/subquery.go | 59 ++-- .../planbuilder/operators/subquery_builder.go | 119 ++----- .../operators/subquery_container.go | 17 +- .../operators/subquery_planning.go | 155 ++++---- go/vt/vtgate/planbuilder/operators/table.go | 7 +- .../operators/{ops => }/to_json.go | 2 +- go/vt/vtgate/planbuilder/operators/union.go | 15 +- .../planbuilder/operators/union_merging.go | 56 ++- go/vt/vtgate/planbuilder/operators/update.go | 131 +++---- .../planbuilder/operators/utils_test.go | 15 +- go/vt/vtgate/planbuilder/operators/vindex.go | 7 +- go/vt/vtgate/planbuilder/plan_test.go | 6 +- go/vt/vtgate/planbuilder/select.go | 3 +- 56 files changed, 1345 insertions(+), 1948 deletions(-) delete mode 100644 go/vt/vtgate/planbuilder/operators/ops/op.go create mode 100644 go/vt/vtgate/planbuilder/operators/plan_query.go rename go/vt/vtgate/planbuilder/operators/{rewrite => }/rewriters.go (67%) rename go/vt/vtgate/planbuilder/operators/{ops => }/to_json.go (98%) diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index 5f965b55ad9..3974a307e71 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -30,13 +30,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) (logicalPlan, error) { +func transformToLogicalPlan(ctx *plancontext.PlanningContext, op operators.Operator) (logicalPlan, error) { switch op := op.(type) { case *operators.Route: return transformRoutePlan(ctx, op) @@ -190,10 +188,7 @@ func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) return newUncorrelatedSubquery(op.FilterType, op.SubqueryValueName, op.HasValuesName, inner, outer), nil } - lhsCols, err := op.OuterExpressionsNeeded(ctx, op.Outer) - if err != nil { - return nil, err - } + lhsCols := op.OuterExpressionsNeeded(ctx, op.Outer) return newSemiJoin(outer, inner, op.Vars, lhsCols), nil } @@ -251,7 +246,7 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ KeyCol: groupBy.ColOffset, WeightStringCol: groupBy.WSOffset, - Expr: groupBy.AsAliasedExpr().Expr, + Expr: groupBy.SimplifiedExpr, Type: typ, }) } @@ -435,7 +430,7 @@ func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route, h } rp := newRoutingParams(ctx, op.Routing.OpCode()) - err = op.Routing.UpdateRoutingParams(ctx, rp) + op.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } @@ -544,7 +539,7 @@ func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route } func buildInsertLogicalPlan( - rb *operators.Route, op ops.Operator, stmt *sqlparser.Insert, + rb *operators.Route, op operators.Operator, stmt *sqlparser.Insert, hints *queryHints, ) (logicalPlan, error) { ins := op.(*operators.Insert) @@ -635,16 +630,13 @@ func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { func buildUpdateLogicalPlan( ctx *plancontext.PlanningContext, rb *operators.Route, - dmlOp ops.Operator, + dmlOp operators.Operator, stmt *sqlparser.Update, hints *queryHints, ) (logicalPlan, error) { upd := dmlOp.(*operators.Update) rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + rb.Routing.UpdateRoutingParams(ctx, rp) edml := &engine.DML{ Query: generateQuery(stmt), TableNames: []string{upd.VTable.Name.String()}, @@ -670,15 +662,12 @@ func buildUpdateLogicalPlan( func buildDeleteLogicalPlan( ctx *plancontext.PlanningContext, rb *operators.Route, - dmlOp ops.Operator, + dmlOp operators.Operator, hints *queryHints, ) (logicalPlan, error) { del := dmlOp.(*operators.Delete) rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + rb.Routing.UpdateRoutingParams(ctx, rp) edml := &engine.DML{ Query: generateQuery(del.AST), TableNames: []string{del.VTable.Name.String()}, @@ -739,7 +728,7 @@ func updateSelectedVindexPredicate(op *operators.Route) sqlparser.Expr { func getAllTableNames(op *operators.Route) ([]string, error) { tableNameMap := map[string]any{} - err := rewrite.Visit(op, func(op ops.Operator) error { + err := operators.Visit(op, func(op operators.Operator) error { tbl, isTbl := op.(*operators.Table) var name string if isTbl { @@ -764,7 +753,7 @@ func getAllTableNames(op *operators.Route) ([]string, error) { } func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (logicalPlan, error) { - sources, err := slice.MapWithError(op.Sources, func(src ops.Operator) (logicalPlan, error) { + sources, err := slice.MapWithError(op.Sources, func(src operators.Operator) (logicalPlan, error) { plan, err := transformToLogicalPlan(ctx, src) if err != nil { return nil, err diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 5201818951d..961a7d252ff 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -23,7 +23,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -33,7 +32,7 @@ type ( ctx *plancontext.PlanningContext stmt sqlparser.Statement tableNames []string - dmlOperator ops.Operator + dmlOperator Operator } ) @@ -41,7 +40,7 @@ func (qb *queryBuilder) asSelectStatement() sqlparser.SelectStatement { return qb.stmt.(sqlparser.SelectStatement) } -func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (_ sqlparser.Statement, _ ops.Operator, err error) { +func ToSQL(ctx *plancontext.PlanningContext, op Operator) (_ sqlparser.Statement, _ Operator, err error) { defer PanicHandler(&err) q := &queryBuilder{ctx: ctx} @@ -347,7 +346,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) { } // buildQuery recursively builds the query into an AST, from an operator tree -func buildQuery(op ops.Operator, qb *queryBuilder) { +func buildQuery(op Operator, qb *queryBuilder) { switch op := op.(type) { case *Table: buildTable(op, qb) @@ -415,7 +414,7 @@ func buildUpdate(op *Update, qb *queryBuilder) { } type OpWithAST interface { - ops.Operator + Operator Statement() sqlparser.Statement } diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go index edba5c51256..e50483ce8d2 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go @@ -24,25 +24,23 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { +func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output Operator, applyResult *ApplyResult) { if aggregator.Pushed { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } // this rewrite is always valid, and we should do it whenever possible if route, ok := aggregator.Source.(*Route); ok && (route.IsSingleShard() || overlappingUniqueVindex(ctx, aggregator.Grouping)) { - return rewrite.Swap(aggregator, route, "push down aggregation under route - remove original") + return Swap(aggregator, route, "push down aggregation under route - remove original") } // other rewrites require us to have reached this phase before we can consider them if !reachedPhase(ctx, delegateAggregation) { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } // if we have not yet been able to push this aggregation down, @@ -54,23 +52,19 @@ func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) switch src := aggregator.Source.(type) { case *Route: // if we have a single sharded route, we can push it down - output, applyResult, err = pushAggregationThroughRoute(ctx, aggregator, src) + output, applyResult = pushAggregationThroughRoute(ctx, aggregator, src) case *ApplyJoin: - output, applyResult, err = pushAggregationThroughJoin(ctx, aggregator, src) + output, applyResult = pushAggregationThroughJoin(ctx, aggregator, src) case *Filter: - output, applyResult, err = pushAggregationThroughFilter(ctx, aggregator, src) + output, applyResult = pushAggregationThroughFilter(ctx, aggregator, src) case *SubQueryContainer: - output, applyResult, err = pushAggregationThroughSubquery(ctx, aggregator, src) + output, applyResult = pushAggregationThroughSubquery(ctx, aggregator, src) default: - return aggregator, rewrite.SameTree, nil - } - - if err != nil { - return nil, nil, err + return aggregator, NoRewrite } if output == nil { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } aggregator.Pushed = true @@ -92,16 +86,13 @@ func pushAggregationThroughSubquery( ctx *plancontext.PlanningContext, rootAggr *Aggregator, src *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { - pushedAggr := rootAggr.Clone([]ops.Operator{src.Outer}).(*Aggregator) +) (Operator, *ApplyResult) { + pushedAggr := rootAggr.Clone([]Operator{src.Outer}).(*Aggregator) pushedAggr.Original = false pushedAggr.Pushed = false for _, subQuery := range src.Inner { - lhsCols, err := subQuery.OuterExpressionsNeeded(ctx, src.Outer) - if err != nil { - return nil, nil, err - } + lhsCols := subQuery.OuterExpressionsNeeded(ctx, src.Outer) for _, colName := range lhsCols { idx := slices.IndexFunc(pushedAggr.Columns, func(ae *sqlparser.AliasedExpr) bool { return ctx.SemTable.EqualsExpr(ae.Expr, colName) @@ -116,12 +107,12 @@ func pushAggregationThroughSubquery( src.Outer = pushedAggr if !rootAggr.Original { - return src, rewrite.NewTree("push Aggregation under subquery - keep original"), nil + return src, Rewrote("push Aggregation under subquery - keep original") } rootAggr.aggregateTheAggregates() - return rootAggr, rewrite.NewTree("push Aggregation under subquery"), nil + return rootAggr, Rewrote("push Aggregation under subquery") } func (a *Aggregator) aggregateTheAggregates() { @@ -145,15 +136,12 @@ func pushAggregationThroughRoute( ctx *plancontext.PlanningContext, aggregator *Aggregator, route *Route, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { // Create a new aggregator to be placed below the route. aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs()) aggrBelowRoute.Aggregations = nil - err := pushAggregations(ctx, aggregator, aggrBelowRoute) - if err != nil { - return nil, nil, err - } + pushAggregations(ctx, aggregator, aggrBelowRoute) // Set the source of the route to the new aggregator placed below the route. route.Source = aggrBelowRoute @@ -161,18 +149,15 @@ func pushAggregationThroughRoute( if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under route - remove original"), nil + return aggregator.Source, Rewrote("push aggregation under route - remove original") } - return aggregator, rewrite.NewTree("push aggregation under route - keep original"), nil + return aggregator, Rewrote("push aggregation under route - keep original") } // pushAggregations splits aggregations between the original aggregator and the one we are pushing down -func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) error { - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return err - } +func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) { + canPushDistinctAggr, distinctExpr := checkIfWeCanPush(ctx, aggregator) distinctAggrGroupByAdded := false @@ -192,7 +177,7 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, // doing the aggregating on the vtgate level instead // Adding to group by can be done only once even though there are multiple distinct aggregation with same expression. if !distinctAggrGroupByAdded { - groupBy := NewGroupBy(distinctExpr, distinctExpr, aeDistinctExpr) + groupBy := NewGroupBy(distinctExpr, distinctExpr) groupBy.ColOffset = aggr.ColOffset aggrBelowRoute.Grouping = append(aggrBelowRoute.Grouping, groupBy) distinctAggrGroupByAdded = true @@ -202,11 +187,9 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, if !canPushDistinctAggr { aggregator.DistinctExpr = distinctExpr } - - return nil } -func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr, error) { +func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr) { canPush := true var distinctExpr sqlparser.Expr var differentExpr *sqlparser.AliasedExpr @@ -229,22 +212,22 @@ func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) } if !canPush && differentExpr != nil { - return false, nil, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr))) + panic(vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr)))) } - return canPush, distinctExpr, nil + return canPush, distinctExpr } func pushAggregationThroughFilter( ctx *plancontext.PlanningContext, aggregator *Aggregator, filter *Filter, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { columnsNeeded := collectColNamesNeeded(ctx, filter) // Create a new aggregator to be placed below the route. - pushedAggr := aggregator.Clone([]ops.Operator{filter.Source}).(*Aggregator) + pushedAggr := aggregator.Clone([]Operator{filter.Source}).(*Aggregator) pushedAggr.Pushed = false pushedAggr.Original = false @@ -264,10 +247,10 @@ withNextColumn: if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under filter - remove original"), nil + return aggregator.Source, Rewrote("push aggregation under filter - remove original") } aggregator.aggregateTheAggregates() - return aggregator, rewrite.NewTree("push aggregation under filter - keep original"), nil + return aggregator, Rewrote("push aggregation under filter - keep original") } func collectColNamesNeeded(ctx *plancontext.PlanningContext, f *Filter) (columnsNeeded []*sqlparser.ColName) { @@ -363,7 +346,7 @@ Transformed: / \ R1 R2 */ -func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (Operator, *ApplyResult) { lhs := &joinPusher{ orig: rootAggr, pushed: &Aggregator{ @@ -387,23 +370,17 @@ func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggr if err != nil { // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query if errors.Is(err, errAbortAggrPushing) { - return nil, nil, nil + return nil, nil } - return nil, nil, err + panic(err) } - groupingJCs, err := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) - if err != nil { - return nil, nil, err - } + groupingJCs := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) joinColumns = append(joinColumns, groupingJCs...) // We need to add any columns coming from the lhs of the join to the group by on that side // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS - err = addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) - if err != nil { - return nil, nil, err - } + addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) join.LHS, join.RHS = lhs.pushed, rhs.pushed join.JoinColumns = joinColumns @@ -411,23 +388,23 @@ func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggr if !rootAggr.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return output, rewrite.NewTree("push Aggregation under join - keep original"), nil + return output, Rewrote("push Aggregation under join - keep original") } rootAggr.aggregateTheAggregates() rootAggr.Source = output - return rootAggr, rewrite.NewTree("push Aggregation under join"), nil + return rootAggr, Rewrote("push Aggregation under join") } var errAbortAggrPushing = fmt.Errorf("abort aggregation pushing") -func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) error { +func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) { for _, pred := range join.JoinPredicates { for _, bve := range pred.LHSExprs { expr := bve.Expr wexpr, err := rootAggr.QP.GetSimplifiedExpr(ctx, expr) if err != nil { - return err + panic(err) } idx, found := canReuseColumn(ctx, lhs.pushed.Columns, expr, extractExpr) if !found { @@ -450,10 +427,9 @@ func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAgg }) } } - return nil } -func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) ([]JoinColumn, error) { +func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) []JoinColumn { var groupingJCs []JoinColumn for _, groupBy := range rootAggr.Grouping { @@ -463,30 +439,27 @@ func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Agg case deps.IsSolvedBy(lhs.tableID): lhs.addGrouping(ctx, groupBy) groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), + Original: groupBy.Inner, LHSExprs: []BindVarExpr{{Expr: expr}}, }) case deps.IsSolvedBy(rhs.tableID): rhs.addGrouping(ctx, groupBy) groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), + Original: groupBy.Inner, RHSExpr: expr, }) case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): - jc, err := breakExpressionInLHSandRHSForApplyJoin(ctx, groupBy.SimplifiedExpr, lhs.tableID) - if err != nil { - return nil, err - } + jc := breakExpressionInLHSandRHSForApplyJoin(ctx, groupBy.SimplifiedExpr, lhs.tableID) for _, lhsExpr := range jc.LHSExprs { e := lhsExpr.Expr - lhs.addGrouping(ctx, NewGroupBy(e, e, aeWrap(e))) + lhs.addGrouping(ctx, NewGroupBy(e, e)) } - rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr, aeWrap(jc.RHSExpr))) + rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr)) default: - return nil, vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr)) + panic(vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr))) } } - return groupingJCs, nil + return groupingJCs } // splitAggrColumnsToLeftAndRight pushes all aggregations on the aggregator above a join and @@ -497,7 +470,7 @@ func splitAggrColumnsToLeftAndRight( aggregator *Aggregator, join *ApplyJoin, lhs, rhs *joinPusher, -) ([]JoinColumn, ops.Operator, error) { +) ([]JoinColumn, Operator, error) { proj := newAliasedProjection(join) proj.FromAggr = true builder := &aggBuilder{ @@ -507,10 +480,7 @@ func splitAggrColumnsToLeftAndRight( outerJoin: join.LeftJoin, } - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return nil, nil, err - } + canPushDistinctAggr, distinctExpr := checkIfWeCanPush(ctx, aggregator) // Distinct aggregation cannot be pushed down in the join. // We keep node of the distinct aggregation expression to be used later for ordering. @@ -531,10 +501,7 @@ outer: continue outer } } - _, err := builder.proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, nil, err - } + builder.proj.addUnexploredExpr(col, col.Expr) } return builder.joinColumns, builder.proj, nil } @@ -566,7 +533,7 @@ func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser ae, created := ab.lhs.countStar(ctx) if created { ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, + Original: ae.Expr, LHSExprs: []BindVarExpr{{Expr: ae.Expr}}, }) } @@ -577,7 +544,7 @@ func (ab *aggBuilder) rightCountStar(ctx *plancontext.PlanningContext) *sqlparse ae, created := ab.rhs.countStar(ctx) if created { ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, + Original: ae.Expr, RHSExpr: ae.Expr, }) } @@ -599,7 +566,8 @@ func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.Ali func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { switch aggr.OpCode { case opcode.AggregateCountStar: - return ab.handleCountStar(ctx, aggr) + ab.handleCountStar(ctx, aggr) + return nil case opcode.AggregateCount, opcode.AggregateSum: return ab.handleAggrWithCountStarMultiplier(ctx, aggr) case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: @@ -632,7 +600,7 @@ func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) er func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { ab.lhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, + Original: aggr.Original.Expr, LHSExprs: []BindVarExpr{{Expr: aggr.Original.Expr}}, }) } @@ -640,16 +608,13 @@ func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { func (ab *aggBuilder) pushThroughRight(aggr Aggr) { ab.rhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, + Original: aggr.Original.Expr, RHSExpr: aggr.Original.Expr, }) } func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { - _, err := ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) - if err != nil { - return err - } + ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) switch { @@ -663,12 +628,12 @@ func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningCont return nil } -func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) error { +func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) { // Add the aggregate to both sides of the join. lhsAE := ab.leftCountStar(ctx) rhsAE := ab.rightCountStar(ctx) - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) } func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { @@ -694,10 +659,11 @@ func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.Plannin return errAbortAggrPushing } - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + return nil } -func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) error { +func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) { // We expect the expressions to be different on each side of the join, otherwise it's an error. if lhsAE.Expr == rhsAE.Expr { panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) @@ -726,8 +692,7 @@ func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), } - _, err := ab.proj.addUnexploredExpr(projAE, projExpr) - return err + ab.proj.addUnexploredExpr(projAE, projExpr) } func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { @@ -820,7 +785,7 @@ func needAvgBreaking(aggrs []Aggr) bool { // splitAvgAggregations takes an aggregator that has AVG aggregations in it and splits // these into sum/count expressions that can be spread out to shards -func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { +func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (Operator, *ApplyResult) { proj := newAliasedProjection(aggr) var columns []*sqlparser.AliasedExpr @@ -848,10 +813,7 @@ func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (o outputColumn := aeWrap(col.Expr) outputColumn.As = sqlparser.NewIdentifierCI(col.ColumnName()) - _, err := proj.addUnexploredExpr(sqlparser.CloneRefOfAliasedExpr(col), calcExpr) - if err != nil { - return nil, nil, err - } + proj.addUnexploredExpr(sqlparser.CloneRefOfAliasedExpr(col), calcExpr) col.Expr = sumExpr found := false for aggrOffset, aggregation := range aggr.Aggregations { @@ -877,5 +839,5 @@ func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (o aggr.Columns = append(aggr.Columns, columns...) aggr.Aggregations = append(aggr.Aggregations, aggregations...) - return proj, rewrite.NewTree("split avg aggregation"), nil + return proj, Rewrote("split avg aggregation") } diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index e1848752e75..6c07343498b 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,7 +34,7 @@ type ( // Both all aggregations and no grouping, and the inverse // of all grouping and no aggregations are valid configurations of this operator Aggregator struct { - Source ops.Operator + Source Operator Columns []*sqlparser.AliasedExpr Grouping []GroupBy @@ -60,7 +59,7 @@ type ( } ) -func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { +func (a *Aggregator) Clone(inputs []Operator) Operator { kopy := *a kopy.Source = inputs[0] kopy.Columns = slices.Clone(a.Columns) @@ -69,18 +68,18 @@ func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (a *Aggregator) Inputs() []ops.Operator { - return []ops.Operator{a.Source} +func (a *Aggregator) Inputs() []Operator { + return []Operator{a.Source} } -func (a *Aggregator) SetInputs(operators []ops.Operator) { +func (a *Aggregator) SetInputs(operators []Operator) { if len(operators) != 1 { panic(fmt.Sprintf("unexpected number of operators as input in aggregator: %d", len(operators))) } a.Source = operators[0] } -func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return &Filter{ Source: a, Predicates: []sqlparser.Expr{expr}, @@ -92,7 +91,7 @@ func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, e a.Columns = append(a.Columns, expr) if addToGroupBy { - groupBy := NewGroupBy(expr.Expr, expr.Expr, expr) + groupBy := NewGroupBy(expr.Expr, expr.Expr) groupBy.ColOffset = offset a.Grouping = append(a.Grouping, groupBy) } else { @@ -193,12 +192,6 @@ func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlpa if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { return offset } - colName, isColName := expr.(*sqlparser.ColName) - for i, col := range a.Columns { - if isColName && colName.Name.EqualString(col.As.String()) { - return i - } - } if addToGroupBy { panic(vterrors.VT13001(fmt.Sprintf("did not expect to add group by here: %s", sqlparser.String(expr)))) @@ -254,11 +247,11 @@ func (a *Aggregator) ShortDescription() string { return fmt.Sprintf("%s%s group by %s", org, strings.Join(columns, ", "), strings.Join(grouping, ",")) } -func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return a.Source.GetOrdering(ctx) } -func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) Operator { if a.offsetPlanned { return nil } @@ -408,7 +401,7 @@ func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliased // SplitAggregatorBelowRoute returns the aggregator that will live under the Route. // This is used when we are splitting the aggregation so one part is done // at the mysql level and one part at the vtgate level -func (a *Aggregator) SplitAggregatorBelowRoute(input []ops.Operator) *Aggregator { +func (a *Aggregator) SplitAggregatorBelowRoute(input []Operator) *Aggregator { newOp := a.Clone(input).(*Aggregator) newOp.Pushed = false newOp.Original = false @@ -420,4 +413,4 @@ func (a *Aggregator) introducesTableID() semantics.TableSet { return a.DT.introducesTableID() } -var _ ops.Operator = (*Aggregator)(nil) +var _ Operator = (*Aggregator)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go index 95d7d962738..7e2c100c944 100644 --- a/go/vt/vtgate/planbuilder/operators/apply_join.go +++ b/go/vt/vtgate/planbuilder/operators/apply_join.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -33,7 +32,7 @@ type ( // ApplyJoin is a nested loop join - for each row on the LHS, // we'll execute the plan on the RHS, feeding data from left to right ApplyJoin struct { - LHS, RHS ops.Operator + LHS, RHS Operator // LeftJoin will be true in the case of an outer join LeftJoin bool @@ -72,7 +71,7 @@ type ( // so they can be used for the result of this expression that is using data from both sides. // All fields will be used for these JoinColumn struct { - Original *sqlparser.AliasedExpr // this is the original expression being passed through + Original sqlparser.Expr // this is the original expression being passed through LHSExprs []BindVarExpr RHSExpr sqlparser.Expr GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true @@ -86,7 +85,7 @@ type ( } ) -func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { +func NewApplyJoin(lhs, rhs Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { return &ApplyJoin{ LHS: lhs, RHS: rhs, @@ -97,7 +96,7 @@ func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin } // Clone implements the Operator interface -func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { +func (aj *ApplyJoin) Clone(inputs []Operator) Operator { kopy := *aj kopy.LHS = inputs[0] kopy.RHS = inputs[1] @@ -110,33 +109,33 @@ func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, aj, expr, false, newFilter) } // Inputs implements the Operator interface -func (aj *ApplyJoin) Inputs() []ops.Operator { - return []ops.Operator{aj.LHS, aj.RHS} +func (aj *ApplyJoin) Inputs() []Operator { + return []Operator{aj.LHS, aj.RHS} } // SetInputs implements the Operator interface -func (aj *ApplyJoin) SetInputs(inputs []ops.Operator) { +func (aj *ApplyJoin) SetInputs(inputs []Operator) { aj.LHS, aj.RHS = inputs[0], inputs[1] } -func (aj *ApplyJoin) GetLHS() ops.Operator { +func (aj *ApplyJoin) GetLHS() Operator { return aj.LHS } -func (aj *ApplyJoin) GetRHS() ops.Operator { +func (aj *ApplyJoin) GetRHS() Operator { return aj.RHS } -func (aj *ApplyJoin) SetLHS(operator ops.Operator) { +func (aj *ApplyJoin) SetLHS(operator Operator) { aj.LHS = operator } -func (aj *ApplyJoin) SetRHS(operator ops.Operator) { +func (aj *ApplyJoin) SetRHS(operator Operator) { aj.RHS = operator } @@ -151,10 +150,7 @@ func (aj *ApplyJoin) IsInner() bool { func (aj *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { aj.Predicate = ctx.SemTable.AndExpressions(expr, aj.Predicate) - col, err := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, TableID(aj.LHS)) - if err != nil { - panic(err) - } + col := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, TableID(aj.LHS)) aj.JoinPredicates = append(aj.JoinPredicates, col) rhs := aj.RHS.AddPredicate(ctx, col.RHSExpr) aj.RHS = rhs @@ -173,21 +169,21 @@ func (aj *ApplyJoin) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser. return transformColumnsToSelectExprs(ctx, aj) } -func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return aj.LHS.GetOrdering(ctx) } func joinColumnToAliasedExpr(c JoinColumn) *sqlparser.AliasedExpr { - return c.Original + return aeWrap(c.Original) } func joinColumnToExpr(column JoinColumn) sqlparser.Expr { - return column.Original.Expr + return column.Original } -func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn, err error) { +func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn) { defer func() { - col.Original = orig + col.Original = orig.Expr }() lhs := TableID(aj.LHS) rhs := TableID(aj.RHS) @@ -201,12 +197,9 @@ func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sq case deps.IsSolvedBy(rhs): col.RHSExpr = e case deps.IsSolvedBy(both): - col, err = breakExpressionInLHSandRHSForApplyJoin(ctx, e, TableID(aj.LHS)) - if err != nil { - return JoinColumn{}, err - } + col = breakExpressionInLHSandRHSForApplyJoin(ctx, e, TableID(aj.LHS)) default: - return JoinColumn{}, vterrors.VT13002(sqlparser.String(e)) + panic(vterrors.VT13002(sqlparser.String(e))) } return @@ -232,16 +225,13 @@ func (aj *ApplyJoin) AddColumn( return offset } } - col, err := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) - if err != nil { - panic(err) - } + col := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) offset := len(aj.JoinColumns) aj.JoinColumns = append(aj.JoinColumns, col) return offset } -func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { for _, col := range aj.JoinColumns { // Read the type description for JoinColumn to understand the following code for _, lhsExpr := range col.LHSExprs { diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go index f6acbadd35a..f8c8891f8f9 100644 --- a/go/vt/vtgate/planbuilder/operators/ast_to_op.go +++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -31,39 +30,28 @@ const foreignKeyConstraintValues = "fkc_vals" const foreignKeyUpdateExpr = "fkc_upd" // translateQueryToOp creates an operator tree that represents the input SELECT or UNION query -func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { +func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) Operator { switch node := selStmt.(type) { case *sqlparser.Select: - op, err = createOperatorFromSelect(ctx, node) + return createOperatorFromSelect(ctx, node) case *sqlparser.Union: - op, err = createOperatorFromUnion(ctx, node) + return createOperatorFromUnion(ctx, node) case *sqlparser.Update: - op, err = createOperatorFromUpdate(ctx, node) + return createOperatorFromUpdate(ctx, node) case *sqlparser.Delete: - op, err = createOperatorFromDelete(ctx, node) + return createOperatorFromDelete(ctx, node) case *sqlparser.Insert: - op, err = createOperatorFromInsert(ctx, node) + return createOperatorFromInsert(ctx, node) default: - err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) + panic(vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt))) } - if err != nil { - return nil, err - } - - return op, nil } -func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { - op, err := crossJoin(ctx, sel.From) - if err != nil { - return nil, err - } +func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) Operator { + op := crossJoin(ctx, sel.From) if sel.Where != nil { - op, err = addWherePredicates(ctx, sel.Where.Expr, op) - if err != nil { - return nil, err - } + op = addWherePredicates(ctx, sel.Where.Expr, op) } if sel.Comments != nil || sel.Lock != sqlparser.NoLock { @@ -76,26 +64,23 @@ func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.S op = newHorizon(op, sel) - return op, nil + return op } -func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { +func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) Operator { sqc := &SubQueryBuilder{} outerID := TableID(op) exprs := sqlparser.SplitAndExpression(nil, expr) for _, expr := range exprs { sqlparser.RemoveKeyspaceFromColName(expr) - subq, err := sqc.handleSubquery(ctx, expr, outerID) - if err != nil { - return nil, err - } + subq := sqc.handleSubquery(ctx, expr, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, expr) addColumnEquality(ctx, expr) } - return sqc.getRootOperator(op, nil), nil + return sqc.getRootOperator(op, nil) } // cloneASTAndSemState clones the AST and the semantic state of the input node. @@ -158,56 +143,44 @@ type joinPredicateCollector struct { func (jpc *joinPredicateCollector) inspectPredicate( ctx *plancontext.PlanningContext, predicate sqlparser.Expr, -) error { +) { pred := predicate deps := ctx.SemTable.RecursiveDeps(predicate) // if the subquery is not enough, but together we have all we need, // then we can use this predicate to connect the subquery to the outer query if !deps.IsSolvedBy(jpc.subqID) && deps.IsSolvedBy(jpc.totalID) { jpc.predicates = append(jpc.predicates, predicate) - jc, err := breakExpressionInLHSandRHSForApplyJoin(ctx, predicate, jpc.outerID) - if err != nil { - return err - } + jc := breakExpressionInLHSandRHSForApplyJoin(ctx, predicate, jpc.outerID) jpc.joinColumns = append(jpc.joinColumns, jc) pred = jc.RHSExpr } jpc.remainingPredicates = append(jpc.remainingPredicates, pred) - return nil } -func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { - opLHS, err := translateQueryToOp(ctx, node.Left) - if err != nil { - return nil, err - } - +func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) Operator { _, isRHSUnion := node.Right.(*sqlparser.Union) if isRHSUnion { - return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") + panic(vterrors.VT12001("nesting of UNIONs on the right-hand side")) } - opRHS, err := translateQueryToOp(ctx, node.Right) - if err != nil { - return nil, err - } - + opLHS := translateQueryToOp(ctx, node.Left) + opRHS := translateQueryToOp(ctx, node.Right) lexprs := ctx.SemTable.SelectExprs(node.Left) rexprs := ctx.SemTable.SelectExprs(node.Right) unionCols := ctx.SemTable.SelectExprs(node) - union := newUnion([]ops.Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) - return newHorizon(union, node), nil + union := newUnion([]Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) + return newHorizon(union, node) } // createOpFromStmt creates an operator from the given statement. It takes in two additional arguments— // 1. verifyAllFKs: For this given statement, do we need to verify validity of all the foreign keys on the vtgate level. // 2. fkToIgnore: The foreign key constraint to specifically ignore while planning the statement. This field is used in UPDATE CASCADE planning, wherein while planning the child update // query, we need to ignore the parent foreign key constraint that caused the cascade in question. -func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) (ops.Operator, error) { +func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) Operator { var err error ctx, err = plancontext.CreatePlanningContext(stmt, ctx.ReservedVars, ctx.VSchema, ctx.PlannerVersion) if err != nil { - return nil, err + panic(err) } // TODO (@GuptaManan100, @harshit-gangal): When we add cross-shard foreign keys support, @@ -222,7 +195,7 @@ func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement // From all the parent foreign keys involved, we should remove the one that we need to ignore. err = ctx.SemTable.RemoveParentForeignKey(fkToIgnore) if err != nil { - return nil, err + panic(err) } // Now, we can filter the foreign keys further based on the planning context, specifically whether we are running @@ -236,13 +209,17 @@ func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement err = ctx.SemTable.RemoveNonRequiredForeignKeys(ctx.VerifyAllFKs, vindexes.DeleteAction) } if err != nil { - return nil, err + panic(err) } - return PlanQuery(ctx, stmt) + op, err := PlanQuery(ctx, stmt) + if err != nil { + panic(err) + } + return op } -func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) Operator { switch tableExpr := tableExpr.(type) { case *sqlparser.AliasedTableExpr: return getOperatorFromAliasedTableExpr(ctx, tableExpr, onlyTable) @@ -251,19 +228,13 @@ func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlpar case *sqlparser.ParenTableExpr: return crossJoin(ctx, tableExpr.Exprs) default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr))) } } -func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { - lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) - if err != nil { - return nil, err - } - rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) - if err != nil { - return nil, err - } +func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) Operator { + lhs := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) + rhs := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) switch tableExpr.Join { case sqlparser.NormalJoinType: @@ -271,17 +242,17 @@ func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *s case sqlparser.LeftJoinType, sqlparser.RightJoinType: return createOuterJoin(tableExpr, lhs, rhs) default: - return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()) + panic(vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString())) } } -func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) Operator { tableID := ctx.SemTable.TableSetFor(tableExpr) switch tbl := tableExpr.Expr.(type) { case sqlparser.TableName: tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, err + panic(err) } if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { @@ -295,73 +266,71 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr }, Vindex: vt.Vindex, Solved: solves, - }, nil + } } qg := newQueryGraph() isInfSchema := tableInfo.IsInfSchema() qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema} qg.Tables = append(qg.Tables, qt) - return qg, nil + return qg case *sqlparser.DerivedTable: if onlyTable && tbl.Select.GetLimit() == nil { tbl.Select.SetOrderBy(nil) } - inner, err := translateQueryToOp(ctx, tbl.Select) - if err != nil { - return nil, err - } + inner := translateQueryToOp(ctx, tbl.Select) if horizon, ok := inner.(*Horizon); ok { horizon.TableId = &tableID horizon.Alias = tableExpr.As.String() horizon.ColumnAliases = tableExpr.Columns qp, err := CreateQPFromSelectStatement(ctx, tbl.Select) if err != nil { - return nil, err + panic(err) } horizon.QP = qp } - return inner, nil + return inner default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl))) } } -func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { - var output ops.Operator +func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) Operator { + var output Operator for _, tableExpr := range exprs { - op, err := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) - if err != nil { - return nil, err - } + op := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) if output == nil { output = op } else { output = createJoin(ctx, output, op) } } - return output, nil + return output } -func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) { +func createQueryTableForDML( + ctx *plancontext.PlanningContext, + tableExpr sqlparser.TableExpr, + whereClause *sqlparser.Where, +) (semantics.TableInfo, *QueryTable) { alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr) if !ok { - return nil, nil, vterrors.VT13001("expected AliasedTableExpr") + panic(vterrors.VT13001("expected AliasedTableExpr")) } tblName, ok := alTbl.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT13001("expected TableName") + panic(vterrors.VT13001("expected TableName")) } tableID := ctx.SemTable.TableSetFor(alTbl) tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, nil, err + panic(err) } if tableInfo.IsInfSchema() { - return nil, nil, vterrors.VT12001("update information schema tables") + panic(vterrors.VT12001("update information schema tables")) } var predicates []sqlparser.Expr @@ -374,7 +343,7 @@ func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparse Table: tblName, Predicates: predicates, } - return tableInfo, qt, nil + return tableInfo, qt } func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { @@ -404,7 +373,7 @@ func createSelectionOp( orderBy sqlparser.OrderBy, limit *sqlparser.Limit, lock sqlparser.Lock, -) (ops.Operator, error) { +) Operator { selectionStmt := &sqlparser.Select{ SelectExprs: selectExprs, From: tableExprs, diff --git a/go/vt/vtgate/planbuilder/operators/comments.go b/go/vt/vtgate/planbuilder/operators/comments.go index 9ede4b9e0da..912fa4138d9 100644 --- a/go/vt/vtgate/planbuilder/operators/comments.go +++ b/go/vt/vtgate/planbuilder/operators/comments.go @@ -21,32 +21,31 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // LockAndComment contains any comments or locking directives we want on all queries down from this operator type LockAndComment struct { - Source ops.Operator + Source Operator Comments *sqlparser.ParsedComments Lock sqlparser.Lock } -func (l *LockAndComment) Clone(inputs []ops.Operator) ops.Operator { +func (l *LockAndComment) Clone(inputs []Operator) Operator { klon := *l klon.Source = inputs[0] return &klon } -func (l *LockAndComment) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *LockAndComment) Inputs() []Operator { + return []Operator{l.Source} } -func (l *LockAndComment) SetInputs(operators []ops.Operator) { +func (l *LockAndComment) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -76,6 +75,6 @@ func (l *LockAndComment) ShortDescription() string { return strings.Join(s, " ") } -func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index 8b7841bdcdd..17f6125992f 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -45,7 +44,7 @@ func (d *Delete) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (d *Delete) Clone([]ops.Operator) ops.Operator { +func (d *Delete) Clone([]Operator) Operator { return &Delete{ QTable: d.QTable, VTable: d.VTable, @@ -61,7 +60,7 @@ func (d *Delete) TablesUsed() []string { return nil } -func (d *Delete) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (d *Delete) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -73,24 +72,13 @@ func (d *Delete) Statement() sqlparser.Statement { return d.AST } -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - if err != nil { - return nil, err - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") - if err != nil { - return nil, err - } +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) Operator { + tableInfo, qt := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) + vindexTable, routing := buildVindexTableForDML(ctx, tableInfo, qt, "delete") delClone := sqlparser.CloneRefOfDelete(deleteStmt) // Create the delete operator first. - delOp, err := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) - if err != nil { - return nil, err - } - + delOp := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) if deleteStmt.Comments != nil { delOp = &LockAndComment{ Source: delOp, @@ -101,11 +89,11 @@ func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlp childFks := ctx.SemTable.GetChildForeignKeysList() // If there are no foreign key constraints, then we don't need to do anything. if len(childFks) == 0 { - return delOp, nil + return delOp } // If the delete statement has a limit, we don't support it yet. if deleteStmt.Limit != nil { - return nil, vterrors.VT12001("foreign keys management at vitess with limit") + panic(vterrors.VT12001("foreign keys management at vitess with limit")) } return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) @@ -116,7 +104,7 @@ func createDeleteOperator( deleteStmt *sqlparser.Delete, qt *QueryTable, vindexTable *vindexes.Table, - routing Routing) (ops.Operator, error) { + routing Routing) Operator { del := &Delete{ QTable: qt, VTable: vindexTable, @@ -128,13 +116,10 @@ func createDeleteOperator( } if !vindexTable.Keyspace.Sharded { - return route, nil + return route } - primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, vindexTable) - if err != nil { - return nil, err - } + primaryVindex, vindexAndPredicates := getVindexInformation(qt.ID, vindexTable) tr, ok := routing.(*ShardedRouting) if ok { @@ -151,58 +136,49 @@ func createDeleteOperator( sqc := &SubQueryBuilder{} for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { + subq := sqc.handleSubquery(ctx, predicate, qt.ID) + if subq != nil { continue } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + + routing = UpdateRoutingLogic(ctx, predicate, routing) } if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard DELETE with LIMIT") + panic(vterrors.VT12001("multi shard DELETE with LIMIT")) } - return sqc.getRootOperator(route, nil), nil + return sqc.getRootOperator(route, nil) } -func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp ops.Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) (ops.Operator, error) { +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) Operator { var fkChildren []*FkChild var selectExprs []sqlparser.SelectExpr for _, fk := range childFks { // Any RESTRICT type foreign keys that arrive here, // are cross-shard/cross-keyspace RESTRICT cases, which we don't currently support. if fk.OnDelete.IsRestrict() { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. var offsets []int offsets, selectExprs = addColumns(ctx, fk.ParentColumns, selectExprs) - fkChild, err := createFkChildForDelete(ctx, fk, offsets) - if err != nil { - return nil, err - } - fkChildren = append(fkChildren, fkChild) - } - selectionOp, err := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, sqlparser.ForUpdateLockNoWait) - if err != nil { - return nil, err + fkChildren = append(fkChildren, + createFkChildForDelete(ctx, fk, offsets)) } + selectionOp := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, sqlparser.ForUpdateLockNoWait) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } } -func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) (*FkChild, error) { +func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) *FkChild { bvName := ctx.ReservedVars.ReserveVariable(foreignKeyConstraintValues) parsedComments := getParsedCommentsForFkChecks(ctx) var childStmt sqlparser.Statement @@ -240,18 +216,15 @@ func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildF Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, } case sqlparser.SetDefault: - return nil, vterrors.VT09016() + panic(vterrors.VT09016()) } // For the child statement of a DELETE query, we don't need to verify all the FKs on VTgate or ignore any foreign key explicitly. - childOp, err := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) - if err != nil { - return nil, err - } + childOp := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) return &FkChild{ BVName: bvName, Cols: cols, Op: childOp, - }, nil + } } diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go index d7aad08d206..74f4495374c 100644 --- a/go/vt/vtgate/planbuilder/operators/distinct.go +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -21,13 +21,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( Distinct struct { - Source ops.Operator + Source Operator QP *QueryProjection // When we go from AST to operator, we place DISTINCT ops in the required places in the op tree @@ -46,7 +45,7 @@ type ( } ) -func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) Operator { columns := d.GetColumns(ctx) for idx, col := range columns { e, err := d.QP.GetSimplifiedExpr(ctx, col.Expr) @@ -71,7 +70,7 @@ func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { return nil } -func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { +func (d *Distinct) Clone(inputs []Operator) Operator { return &Distinct{ Required: d.Required, Source: inputs[0], @@ -82,15 +81,15 @@ func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { } } -func (d *Distinct) Inputs() []ops.Operator { - return []ops.Operator{d.Source} +func (d *Distinct) Inputs() []Operator { + return []Operator{d.Source} } -func (d *Distinct) SetInputs(operators []ops.Operator) { +func (d *Distinct) SetInputs(operators []Operator) { d.Source = operators[0] } -func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { d.Source = d.Source.AddPredicate(ctx, expr) return d } @@ -118,7 +117,7 @@ func (d *Distinct) ShortDescription() string { return "Performance" } -func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return d.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index 8f87a71c95f..3140142858c 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -19,12 +19,11 @@ package operators import ( "fmt" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -33,12 +32,11 @@ import ( // If it cannot find a unique vindex match, it returns an error. func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( *vindexes.ColumnVindex, - []*VindexPlusPredicates, - error) { + []*VindexPlusPredicates) { // Check that we have a primary vindex which is valid if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { - return nil, nil, vterrors.VT09001(table.Name) + panic(vterrors.VT09001(table.Name)) } primaryVindex := table.ColumnVindexes[0] @@ -55,10 +53,16 @@ func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( TableID: id, }) } - return primaryVindex, vindexesAndPredicates, nil + return primaryVindex, vindexesAndPredicates } -func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI, assignments []SetExpr) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string, err error) { +func buildChangedVindexesValues( + ctx *plancontext.PlanningContext, + update *sqlparser.Update, + table *vindexes.Table, + ksidCols []sqlparser.IdentifierCI, + assignments []SetExpr, +) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string) { changedVindexes := make(map[string]*engine.VindexValues) buf, offset := initialQuery(ksidCols, table) for i, vindex := range table.ColumnVindexes { @@ -72,7 +76,7 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar continue } if found { - return nil, "", nil, vterrors.VT03015(assignment.Name.Name) + panic(vterrors.VT03015(assignment.Name.Name)) } found = true pv, err := evalengine.Translate(assignment.Expr.EvalExpr, &evalengine.Config{ @@ -80,7 +84,7 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, "", nil, invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr) + panic(invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr)) } if assignment.Expr.Info != nil { @@ -107,13 +111,13 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar } if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name))) } if i == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name))) } if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name))) } changedVindexes[vindex.Name] = &engine.VindexValues{ EvalExprMap: vindexValueMap, @@ -122,16 +126,16 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar offset++ } if len(changedVindexes) == 0 { - return nil, "", nil, nil + return nil, "", nil } // generate rest of the owned vindex query. aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) if !ok { - return nil, "", nil, vterrors.VT12001("UPDATE on complex table expression") + panic(vterrors.VT12001("UPDATE on complex table expression")) } tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), subQueriesArgOnChangedVindex, nil + return changedVindexes, buf.String(), subQueriesArgOnChangedVindex } func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go index 0df875a6fbd..65600155631 100644 --- a/go/vt/vtgate/planbuilder/operators/expressions.go +++ b/go/vt/vtgate/planbuilder/operators/expressions.go @@ -28,7 +28,7 @@ func breakExpressionInLHSandRHSForApplyJoin( ctx *plancontext.PlanningContext, expr sqlparser.Expr, lhs semantics.TableSet, -) (col JoinColumn, err error) { +) (col JoinColumn) { rewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { nodeExpr, ok := cursor.Node().(sqlparser.Expr) if !ok || !fetchByOffset(nodeExpr) { @@ -51,9 +51,6 @@ func breakExpressionInLHSandRHSForApplyJoin( cursor.Replace(arg) }, nil).(sqlparser.Expr) - if err != nil { - return JoinColumn{}, err - } ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr) col.RHSExpr = rewrittenExpr return diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index cee57c74943..f2171c43a1b 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -24,14 +24,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type Filter struct { - Source ops.Operator + Source Operator Predicates []sqlparser.Expr // PredicateWithOffsets is the evalengine expression that will finally be used. @@ -41,14 +39,14 @@ type Filter struct { Truncate int } -func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { +func newFilter(op Operator, expr sqlparser.Expr) Operator { return &Filter{ Source: op, Predicates: []sqlparser.Expr{expr}, } } // Clone implements the Operator interface -func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { +func (f *Filter) Clone(inputs []Operator) Operator { return &Filter{ Source: inputs[0], Predicates: slices.Clone(f.Predicates), @@ -58,12 +56,12 @@ func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (f *Filter) Inputs() []ops.Operator { - return []ops.Operator{f.Source} +func (f *Filter) Inputs() []Operator { + return []Operator{f.Source} } // SetInputs implements the Operator interface -func (f *Filter) SetInputs(ops []ops.Operator) { +func (f *Filter) SetInputs(ops []Operator) { f.Source = ops[0] } @@ -80,7 +78,7 @@ func (f *Filter) UnsolvedPredicates(st *semantics.SemTable) []sqlparser.Expr { return result } -func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { f.Source = f.Source.AddPredicate(ctx, expr) return f } @@ -101,25 +99,25 @@ func (f *Filter) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return f.Source.GetSelectExprs(ctx) } -func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return f.Source.GetOrdering(ctx) } -func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (f *Filter) Compact(*plancontext.PlanningContext) (Operator, *ApplyResult) { if len(f.Predicates) == 0 { - return f.Source, rewrite.NewTree("filter with no predicates removed"), nil + return f.Source, Rewrote("filter with no predicates removed") } other, isFilter := f.Source.(*Filter) if !isFilter { - return f, rewrite.SameTree, nil + return f, NoRewrite } f.Source = other.Source f.Predicates = append(f.Predicates, other.Predicates...) - return f, rewrite.NewTree("two filters merged into one"), nil + return f, Rewrote("two filters merged into one") } -func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) Operator { cfg := &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, diff --git a/go/vt/vtgate/planbuilder/operators/fk_cascade.go b/go/vt/vtgate/planbuilder/operators/fk_cascade.go index 73b902a4980..f24b59ca5ab 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_cascade.go +++ b/go/vt/vtgate/planbuilder/operators/fk_cascade.go @@ -20,7 +20,6 @@ import ( "slices" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -29,7 +28,7 @@ type FkChild struct { BVName string Cols []int // indexes NonLiteralInfo []engine.NonLiteralUpdateInfo - Op ops.Operator + Op Operator noColumns noPredicates @@ -39,19 +38,19 @@ type FkChild struct { // as an operator. This operator is created for DML queries that require // cascades (for example, ON DELETE CASCADE). type FkCascade struct { - Selection ops.Operator + Selection Operator Children []*FkChild - Parent ops.Operator + Parent Operator noColumns noPredicates } -var _ ops.Operator = (*FkCascade)(nil) +var _ Operator = (*FkCascade)(nil) // Inputs implements the Operator interface -func (fkc *FkCascade) Inputs() []ops.Operator { - var inputs []ops.Operator +func (fkc *FkCascade) Inputs() []Operator { + var inputs []Operator inputs = append(inputs, fkc.Parent) inputs = append(inputs, fkc.Selection) for _, child := range fkc.Children { @@ -61,7 +60,7 @@ func (fkc *FkCascade) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkc *FkCascade) SetInputs(operators []ops.Operator) { +func (fkc *FkCascade) SetInputs(operators []Operator) { if len(operators) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -76,7 +75,7 @@ func (fkc *FkCascade) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { +func (fkc *FkCascade) Clone(inputs []Operator) Operator { if len(inputs) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -100,7 +99,7 @@ func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { } // GetOrdering implements the Operator interface -func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/fk_verify.go b/go/vt/vtgate/planbuilder/operators/fk_verify.go index 39e1092c8d9..8275a8d462f 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_verify.go +++ b/go/vt/vtgate/planbuilder/operators/fk_verify.go @@ -17,14 +17,13 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // VerifyOp keeps the information about the foreign key verification operation. // It is a Parent verification or a Child verification. type VerifyOp struct { - Op ops.Operator + Op Operator Typ string } @@ -33,17 +32,17 @@ type VerifyOp struct { // verifications on the existence of the rows in the parent table (for example, INSERT and UPDATE). type FkVerify struct { Verify []*VerifyOp - Input ops.Operator + Input Operator noColumns noPredicates } -var _ ops.Operator = (*FkVerify)(nil) +var _ Operator = (*FkVerify)(nil) // Inputs implements the Operator interface -func (fkv *FkVerify) Inputs() []ops.Operator { - inputs := []ops.Operator{fkv.Input} +func (fkv *FkVerify) Inputs() []Operator { + inputs := []Operator{fkv.Input} for _, v := range fkv.Verify { inputs = append(inputs, v.Op) } @@ -51,7 +50,7 @@ func (fkv *FkVerify) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkv *FkVerify) SetInputs(operators []ops.Operator) { +func (fkv *FkVerify) SetInputs(operators []Operator) { fkv.Input = operators[0] if len(fkv.Verify) != len(operators)-1 { panic("mismatched number of verify inputs") @@ -62,7 +61,7 @@ func (fkv *FkVerify) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { +func (fkv *FkVerify) Clone(inputs []Operator) Operator { newFkv := &FkVerify{ Verify: fkv.Verify, } @@ -71,7 +70,7 @@ func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { } // GetOrdering implements the Operator interface -func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go index e9cfeb7d107..ce23e510c09 100644 --- a/go/vt/vtgate/planbuilder/operators/hash_join.go +++ b/go/vt/vtgate/planbuilder/operators/hash_join.go @@ -25,14 +25,13 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( HashJoin struct { - LHS, RHS ops.Operator + LHS, RHS Operator // LeftJoin will be true in the case of an outer join LeftJoin bool @@ -62,10 +61,10 @@ type ( } ) -var _ ops.Operator = (*HashJoin)(nil) +var _ Operator = (*HashJoin)(nil) var _ JoinOp = (*HashJoin)(nil) -func NewHashJoin(lhs, rhs ops.Operator, outerJoin bool) *HashJoin { +func NewHashJoin(lhs, rhs Operator, outerJoin bool) *HashJoin { hj := &HashJoin{ LHS: lhs, RHS: rhs, @@ -74,7 +73,7 @@ func NewHashJoin(lhs, rhs ops.Operator, outerJoin bool) *HashJoin { return hj } -func (hj *HashJoin) Clone(inputs []ops.Operator) ops.Operator { +func (hj *HashJoin) Clone(inputs []Operator) Operator { kopy := *hj kopy.LHS, kopy.RHS = inputs[0], inputs[1] kopy.columns = slices.Clone(hj.columns) @@ -83,15 +82,15 @@ func (hj *HashJoin) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (hj *HashJoin) Inputs() []ops.Operator { - return []ops.Operator{hj.LHS, hj.RHS} +func (hj *HashJoin) Inputs() []Operator { + return []Operator{hj.LHS, hj.RHS} } -func (hj *HashJoin) SetInputs(operators []ops.Operator) { +func (hj *HashJoin) SetInputs(operators []Operator) { hj.LHS, hj.RHS = operators[0], operators[1] } -func (hj *HashJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (hj *HashJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, hj, expr, false, newFilter) } @@ -107,7 +106,7 @@ func (hj *HashJoin) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bo return len(hj.columns) - 1 } -func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { if hj.offset { return nil } @@ -124,15 +123,11 @@ func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { }) proj := newAliasedProjection(hj) - _, err := proj.addProjExpr(eexprs...) - if err != nil { - panic(err) - } - + proj.addProjExpr(eexprs...) return proj } -func (hj *HashJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { +func (hj *HashJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { for offset, col := range hj.columns { if ctx.SemTable.EqualsExprWithDeps(expr, col) { return offset @@ -162,23 +157,23 @@ func (hj *HashJoin) ShortDescription() string { return cmp } -func (hj *HashJoin) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (hj *HashJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return nil // hash joins will never promise an output order } -func (hj *HashJoin) GetLHS() ops.Operator { +func (hj *HashJoin) GetLHS() Operator { return hj.LHS } -func (hj *HashJoin) GetRHS() ops.Operator { +func (hj *HashJoin) GetRHS() Operator { return hj.RHS } -func (hj *HashJoin) SetLHS(op ops.Operator) { +func (hj *HashJoin) SetLHS(op Operator) { hj.LHS = op } -func (hj *HashJoin) SetRHS(op ops.Operator) { +func (hj *HashJoin) SetRHS(op Operator) { hj.RHS = op } @@ -239,7 +234,7 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp return true } deps := ctx.SemTable.RecursiveDeps(expr) - check := func(id semantics.TableSet, op ops.Operator, offsetter func(int) int) int { + check := func(id semantics.TableSet, op Operator, offsetter func(int) int) int { if !deps.IsSolvedBy(id) { return -1 } diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go index 21be634d7d8..e5801f6b36f 100644 --- a/go/vt/vtgate/planbuilder/operators/helpers.go +++ b/go/vt/vtgate/planbuilder/operators/helpers.go @@ -21,36 +21,34 @@ import ( "sort" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) // compact will optimise the operator tree into a smaller but equivalent version -func compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func compact(ctx *plancontext.PlanningContext, op Operator) Operator { type compactable interface { // Compact implement this interface for operators that have easy to see optimisations - Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) + Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) } - newOp, err := rewrite.BottomUp(op, TableID, func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + newOp := BottomUp(op, TableID, func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { newOp, ok := op.(compactable) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } return newOp.Compact(ctx) }, stopAtRoute) - return newOp, err + return newOp } -func checkValid(op ops.Operator) error { +func checkValid(op Operator) error { type checkable interface { CheckValid() error } - return rewrite.Visit(op, func(this ops.Operator) error { + return Visit(op, func(this Operator) error { if chk, ok := this.(checkable); ok { return chk.CheckValid() } @@ -58,9 +56,9 @@ func checkValid(op ops.Operator) error { }) } -func Clone(op ops.Operator) ops.Operator { +func Clone(op Operator) Operator { inputs := op.Inputs() - clones := make([]ops.Operator, len(inputs)) + clones := make([]Operator, len(inputs)) for i, input := range inputs { clones[i] = Clone(input) } @@ -72,8 +70,8 @@ type tableIDIntroducer interface { introducesTableID() semantics.TableSet } -func TableID(op ops.Operator) (result semantics.TableSet) { - _ = rewrite.Visit(op, func(this ops.Operator) error { +func TableID(op Operator) (result semantics.TableSet) { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(tableIDIntroducer); ok { result = result.Merge(tbl.introducesTableID()) } @@ -87,9 +85,9 @@ type TableUser interface { TablesUsed() []string } -func TablesUsed(op ops.Operator) []string { +func TablesUsed(op Operator) []string { addString, collect := collectSortedUniqueStrings() - _ = rewrite.Visit(op, func(this ops.Operator) error { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(TableUser); ok { for _, u := range tbl.TablesUsed() { addString(u) @@ -100,29 +98,7 @@ func TablesUsed(op ops.Operator) []string { return collect() } -func UnresolvedPredicates(op ops.Operator, st *semantics.SemTable) (result []sqlparser.Expr) { - type unresolved interface { - // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and - // on the outside of it (a parent Select expression, any other table not used by Operator, etc.). - // This is used for sub-queries. An example query could be: - // SELECT * FROM tbl WHERE EXISTS (SELECT 1 FROM otherTbl WHERE tbl.col = otherTbl.col) - // The subquery would have one unsolved predicate: `tbl.col = otherTbl.col` - // It's a predicate that belongs to the inner query, but it needs data from the outer query - // These predicates dictate which data we have to send from the outer side to the inner - UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr - } - - _ = rewrite.Visit(op, func(this ops.Operator) error { - if tbl, ok := this.(unresolved); ok { - result = append(result, tbl.UnsolvedPredicates(st)...) - } - - return nil - }) - return -} - -func CostOf(op ops.Operator) (cost int) { +func CostOf(op Operator) (cost int) { type costly interface { // Cost returns the cost for this operator. All the costly operators in the tree are summed together to get the // total cost of the operator tree. @@ -131,7 +107,7 @@ func CostOf(op ops.Operator) (cost int) { Cost() int } - _ = rewrite.Visit(op, func(op ops.Operator) error { + _ = Visit(op, func(op Operator) error { if costlyOp, ok := op.(costly); ok { cost += costlyOp.Cost() } diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go index c58db4f3964..1a6fc6331ea 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon.go +++ b/go/vt/vtgate/planbuilder/operators/horizon.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,7 +34,7 @@ import ( // Project/Aggregate/Sort/Limit operations, some which can be pushed down, // and some that have to be evaluated at the vtgate level. type Horizon struct { - Source ops.Operator + Source Operator // If this is a derived table, the two following fields will contain the tableID and name of it TableId *semantics.TableSet @@ -52,12 +51,12 @@ type Horizon struct { ColumnsOffset []int } -func newHorizon(src ops.Operator, query sqlparser.SelectStatement) *Horizon { +func newHorizon(src Operator, query sqlparser.SelectStatement) *Horizon { return &Horizon{Source: src, Query: query} } // Clone implements the Operator interface -func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { +func (h *Horizon) Clone(inputs []Operator) Operator { klone := *h klone.Source = inputs[0] klone.ColumnAliases = sqlparser.CloneColumns(h.ColumnAliases) @@ -77,16 +76,16 @@ func (h *Horizon) IsMergeable(ctx *plancontext.PlanningContext) bool { } // Inputs implements the Operator interface -func (h *Horizon) Inputs() []ops.Operator { - return []ops.Operator{h.Source} +func (h *Horizon) Inputs() []Operator { + return []Operator{h.Source} } // SetInputs implements the Operator interface -func (h *Horizon) SetInputs(ops []ops.Operator) { +func (h *Horizon) SetInputs(ops []Operator) { h.Source = ops[0] } -func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { if _, isUNion := h.Source.(*Union); isUNion { // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting h.Source = h.Source.AddPredicate(ctx, expr) @@ -181,12 +180,9 @@ func (h *Horizon) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectE return sqlparser.GetFirstSelect(h.Query).SelectExprs } -func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { if h.QP == nil { - _, err := h.getQP(ctx) - if err != nil { - panic(err) - } + h.getQP(ctx) } return h.QP.OrderExprs } @@ -196,20 +192,20 @@ func (h *Horizon) selectStatement() sqlparser.SelectStatement { return h.Query } -func (h *Horizon) src() ops.Operator { +func (h *Horizon) src() Operator { return h.Source } -func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { +func (h *Horizon) getQP(ctx *plancontext.PlanningContext) *QueryProjection { if h.QP != nil { - return h.QP, nil + return h.QP } qp, err := CreateQPFromSelectStatement(ctx, h.Query) if err != nil { - return nil, err + panic(err) } h.QP = qp - return h.QP, nil + return h.QP } func (h *Horizon) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go index 06bcf2aaeb5..e3ddc5d9232 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go +++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go @@ -23,12 +23,10 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (Operator, *ApplyResult) { statement := horizon.selectStatement() switch sel := statement.(type) { case *sqlparser.Select: @@ -36,16 +34,13 @@ func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Oper case *sqlparser.Union: return expandUnionHorizon(ctx, horizon, sel) } - return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement)) + panic(vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement))) } -func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (ops.Operator, *rewrite.ApplyResult, error) { +func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (Operator, *ApplyResult) { op := horizon.Source - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := horizon.getQP(ctx) if len(qp.OrderExprs) > 0 { op = &Ordering{ @@ -72,20 +67,15 @@ func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, unio } if op == horizon.Source { - return op, rewrite.NewTree("removed UNION horizon not used"), nil + return op, Rewrote("removed UNION horizon not used") } - return op, rewrite.NewTree("expand UNION horizon into smaller components"), nil + return op, Rewrote("expand UNION horizon into smaller components") } -func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (ops.Operator, *rewrite.ApplyResult, error) { +func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (Operator, *ApplyResult) { op := createProjectionFromSelect(ctx, horizon) - - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } - + qp := horizon.getQP(ctx) var extracted []string if qp.HasAggr { extracted = append(extracted, "Aggregation") @@ -103,10 +93,7 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel } if sel.Having != nil { - op, err = addWherePredicates(ctx, sel.Having.Expr, op) - if err != nil { - return nil, nil, err - } + op = addWherePredicates(ctx, sel.Having.Expr, op) extracted = append(extracted, "Filter") } @@ -126,14 +113,11 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel extracted = append(extracted, "Limit") } - return op, rewrite.NewTree(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", "))), nil + return op, Rewrote(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", "))) } -func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out ops.Operator) { - qp, err := horizon.getQP(ctx) - if err != nil { - panic(err) - } +func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out Operator) { + qp := horizon.getQP(ctx) var dt *DerivedTable if horizon.TableId != nil { @@ -172,7 +156,7 @@ func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horiz return createProjectionForSimpleAggregation(ctx, a, qp) } -func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) Operator { outer: for colIdx, expr := range qp.SelectExprs { ae, err := expr.GetAliasedExpr() @@ -206,7 +190,7 @@ outer: return a } -func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) Operator { p := newAliasedProjection(a) p.DT = a.DT for _, expr := range qp.SelectExprs { @@ -215,10 +199,7 @@ func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) o panic(err) } - _, err = p.addProjExpr(newProjExpr(ae)) - if err != nil { - panic(err) - } + p.addProjExpr(newProjExpr(ae)) } for i, by := range a.Grouping { a.Grouping[i].ColOffset = len(a.Columns) @@ -231,7 +212,7 @@ func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) o return p } -func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) *Projection { +func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection { // first we need to check if we have all columns or there are still unexpanded stars aes, err := slice.MapWithError(qp.SelectExprs, func(from SelectExpr) (*sqlparser.AliasedExpr, error) { ae, ok := from.Col.(*sqlparser.AliasedExpr) @@ -252,28 +233,19 @@ func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProj for _, ae := range aes { org := sqlparser.CloneRefOfAliasedExpr(ae) expr := ae.Expr - newExpr, subqs, err := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) - if err != nil { - panic(err) - } + newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) if newExpr == nil { // there was no subquery in this expression - _, err := proj.addUnexploredExpr(org, expr) - if err != nil { - panic(err) - } + proj.addUnexploredExpr(org, expr) } else { - err := proj.addSubqueryExpr(org, newExpr, subqs...) - if err != nil { - panic(err) - } + proj.addSubqueryExpr(org, newExpr, subqs...) } } proj.Source = sqc.getRootOperator(src, nil) return proj } -func newStarProjection(src ops.Operator, qp *QueryProjection) *Projection { +func newStarProjection(src Operator, qp *QueryProjection) *Projection { cols := sqlparser.SelectExprs{} for _, expr := range qp.SelectExprs { diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index 4f096e1ac65..f7de09c4857 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -41,7 +41,7 @@ type InfoSchemaRouting struct { Table *QueryTable } -func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.SysTableTableSchema = nil for _, expr := range isr.SysTableTableSchema { eexpr, err := evalengine.Translate(expr, &evalengine.Config{ @@ -49,7 +49,7 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext ResolveColumn: NotImplementedSchemaInfoResolver, }) if err != nil { - return err + panic(err) } rp.SysTableTableSchema = append(rp.SysTableTableSchema, eexpr) } @@ -61,12 +61,11 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext ResolveColumn: NotImplementedSchemaInfoResolver, }) if err != nil { - return err + panic(err) } rp.SysTableTableName[k] = eexpr } - return nil } func (isr *InfoSchemaRouting) Clone() Routing { @@ -77,10 +76,10 @@ func (isr *InfoSchemaRouting) Clone() Routing { } } -func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { isTableSchema, bvName, out := extractInfoSchemaRoutingPredicate(ctx, expr) if out == nil { - return isr, nil + return isr } if isr.SysTableTableName == nil { @@ -92,14 +91,14 @@ func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContex if sqlparser.Equals.Expr(out, s) { // we already have this expression in the list // stating it again does not add value - return isr, nil + return isr } } isr.SysTableTableSchema = append(isr.SysTableTableSchema, out) } else { isr.SysTableTableName[bvName] = out } - return isr, nil + return isr } func (isr *InfoSchemaRouting) Cost() int { diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go index fa2f60dcecc..f783ac7a5bc 100644 --- a/go/vt/vtgate/planbuilder/operators/insert.go +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -24,7 +24,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -79,13 +78,13 @@ func (i *Insert) ShortDescription() string { return i.VTable.String() } -func (i *Insert) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (i *Insert) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*Insert)(nil) +var _ Operator = (*Insert)(nil) -func (i *Insert) Clone([]ops.Operator) ops.Operator { +func (i *Insert) Clone([]Operator) Operator { return &Insert{ VTable: i.VTable, AST: i.AST, @@ -105,16 +104,10 @@ func (i *Insert) Statement() sqlparser.Statement { return i.AST } -func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) - if err != nil { - return nil, err - } +func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) Operator { + tableInfo, qt := createQueryTableForDML(ctx, ins.Table, nil) - vTbl, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") - if err != nil { - return nil, err - } + vTbl, routing := buildVindexTableForDML(ctx, tableInfo, qt, "insert") deleteBeforeInsert := false if ins.Action == sqlparser.ReplaceAct && @@ -125,37 +118,27 @@ func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.I deleteBeforeInsert = true } - insOp, err := checkAndCreateInsertOperator(ctx, ins, vTbl, routing) - if err != nil { - return nil, err - } + insOp := checkAndCreateInsertOperator(ctx, ins, vTbl, routing) if !deleteBeforeInsert { - return insOp, nil + return insOp } rows, isRows := ins.Rows.(sqlparser.Values) if !isRows { - return nil, vterrors.VT12001("REPLACE INTO using select statement") + panic(vterrors.VT12001("REPLACE INTO using select statement")) } pkCompExpr := pkCompExpression(vTbl, ins, rows) - uniqKeyCompExprs, err := uniqKeyCompExpressions(vTbl, ins, rows) - if err != nil { - return nil, err - } - + uniqKeyCompExprs := uniqKeyCompExpressions(vTbl, ins, rows) whereExpr := getWhereCondExpr(append(uniqKeyCompExprs, pkCompExpr)) delStmt := &sqlparser.Delete{ TableExprs: sqlparser.TableExprs{sqlparser.CloneRefOfAliasedTableExpr(ins.Table)}, Where: sqlparser.NewWhere(sqlparser.WhereClause, whereExpr), } - delOp, err := createOpFromStmt(ctx, delStmt, false, "") - if err != nil { - return nil, err - } - return &Sequential{Sources: []ops.Operator{delOp, insOp}}, nil + delOp := createOpFromStmt(ctx, delStmt, false, "") + return &Sequential{Sources: []Operator{delOp, insOp}} } func getWhereCondExpr(compExprs []*sqlparser.ComparisonExpr) sqlparser.Expr { @@ -229,10 +212,10 @@ type uComp struct { def sqlparser.Expr } -func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) (comps []*sqlparser.ComparisonExpr, err error) { +func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) (comps []*sqlparser.ComparisonExpr) { noOfUniqKeys := len(vTbl.UniqueKeys) if noOfUniqKeys == 0 { - return nil, nil + return nil } type uIdx struct { @@ -248,10 +231,7 @@ func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sq skipKey := false for _, expr := range uniqKey { var offsets []uComp - offsets, skipKey, err = createUniqueKeyComp(ins, expr, vTbl) - if err != nil { - return nil, err - } + offsets, skipKey = createUniqueKeyComp(ins, expr, vTbl) if skipKey { break } @@ -293,10 +273,10 @@ func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sq for i, valTuple := range allValTuples { compExprs = append(compExprs, sqlparser.NewComparisonExpr(sqlparser.InOp, allColTuples[i], valTuple, nil)) } - return compExprs, nil + return compExprs } -func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vindexes.Table) ([]uComp, bool, error) { +func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vindexes.Table) ([]uComp, bool) { col, isCol := expr.(*sqlparser.ColName) if isCol { var def sqlparser.Expr @@ -305,13 +285,13 @@ func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vinde def = findDefault(vTbl, col.Name) if def == nil { // default value is empty, nothing to compare as it will always be false. - return nil, true, nil + return nil, true } } - return []uComp{{idx, def}}, false, nil + return []uComp{{idx, def}}, false } var offsets []uComp - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { col, ok := node.(*sqlparser.ColName) if !ok { return true, nil @@ -328,14 +308,11 @@ func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vinde offsets = append(offsets, uComp{idx, def}) return false, nil }, expr) - return offsets, false, err + return offsets, false } -func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { - insOp, err := createInsertOperator(ctx, ins, vTbl, routing) - if err != nil { - return nil, err - } +func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) Operator { + insOp := createInsertOperator(ctx, ins, vTbl, routing) if ins.Comments != nil { insOp = &LockAndComment{ @@ -347,31 +324,31 @@ func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlpars // Find the foreign key mode and for unmanaged foreign-key-mode, we don't need to do anything. ksMode, err := ctx.VSchema.ForeignKeyMode(vTbl.Keyspace.Name) if err != nil { - return nil, err + return nil } if ksMode != vschemapb.Keyspace_managed { - return insOp, nil + return insOp } parentFKs := ctx.SemTable.GetParentForeignKeysList() childFks := ctx.SemTable.GetChildForeignKeysList() if len(parentFKs) > 0 { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } if len(childFks) > 0 { if ins.Action == sqlparser.ReplaceAct { - return nil, vterrors.VT12001("REPLACE INTO with foreign keys") + panic(vterrors.VT12001("REPLACE INTO with foreign keys")) } if len(ins.OnDup) > 0 { - return nil, vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys") + panic(vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys")) } } - return insOp, nil + return insOp } -func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { +func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) Operator { if _, target := routing.(*TargetedRouting); target { - return nil, vterrors.VT09017("INSERT with a target destination is not allowed") + panic(vterrors.VT09017("INSERT with a target destination is not allowed")) } insOp := &Insert{ @@ -390,15 +367,12 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I if vTbl.ColumnListAuthoritative { insStmt = populateInsertColumnlist(insStmt, vTbl) } else { - return nil, vterrors.VT09004() + panic(vterrors.VT09004()) } } // modify column list or values for autoincrement column. - autoIncGen, err := modifyForAutoinc(ctx, insStmt, vTbl) - if err != nil { - return nil, err - } + autoIncGen := modifyForAutoinc(ctx, insStmt, vTbl) insOp.AutoIncrement = autoIncGen // set insert ignore. @@ -407,24 +381,27 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I insOp.ColVindexes = getColVindexes(insOp) switch rows := insStmt.Rows.(type) { case sqlparser.Values: - route.Source, err = insertRowsPlan(ctx, insOp, insStmt, rows) - if err != nil { - return nil, err - } + route.Source = insertRowsPlan(ctx, insOp, insStmt, rows) case sqlparser.SelectStatement: return insertSelectPlan(ctx, insOp, route, insStmt, rows) } - return route, nil + return route } -func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp *Route, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*InsertSelection, error) { +func insertSelectPlan( + ctx *plancontext.PlanningContext, + insOp *Insert, + routeOp *Route, + ins *sqlparser.Insert, + sel sqlparser.SelectStatement, +) *InsertSelection { if columnMismatch(insOp.AutoIncrement, ins, sel) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } selOp, err := PlanQuery(ctx, sel) if err != nil { - return nil, err + panic(err) } // output of the select plan will be used to insert rows into the table. @@ -449,28 +426,24 @@ func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp * } if len(insOp.ColVindexes) == 0 { - return insertSelect, nil + return insertSelect } colVindexes := insOp.ColVindexes vv := make([][]int, len(colVindexes)) for idx, colVindex := range colVindexes { for _, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } - + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) colNum := findColumn(ins, col) // sharding column values should be provided in the insert. if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) + panic(vterrors.VT09003(col)) } vv[idx] = append(vv[idx], colNum) } } insOp.VindexValueOffset = vv - return insertSelect, nil + return insertSelect } func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { @@ -498,15 +471,15 @@ func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectSt return false } -func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { +func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) *Insert { for _, row := range rows { if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } } if len(insOp.ColVindexes) == 0 { - return insOp, nil + return insOp } colVindexes := insOp.ColVindexes @@ -514,10 +487,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar for vIdx, colVindex := range colVindexes { routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) for colIdx, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) colNum, _ := findOrAddColumn(ins, col) for rowNum, row := range rows { @@ -526,7 +496,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, err + panic(err) } routeValues[vIdx][colIdx][rowNum] = innerpv } @@ -543,7 +513,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar } } insOp.VindexValues = routeValues - return insOp, nil + return insOp } func valuesProvided(rows sqlparser.InsertRows) bool { @@ -571,18 +541,17 @@ func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { return } -func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { +func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) { for _, assignment := range setClauses { if col.Equal(assignment.Name.Name) { valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) // update on duplicate key is changing the vindex column, not supported. if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return vterrors.VT12001("DML cannot update vindex column") + panic(vterrors.VT12001("DML cannot update vindex column")) } - return nil + return } } - return nil } // findOrAddColumn finds the position of a column in the insert. If it's @@ -625,9 +594,9 @@ func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sql // modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. // For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { +func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) *Generate { if vTable.AutoIncrement == nil { - return nil, nil + return nil } gen := &Generate{ Keyspace: vTable.AutoIncrement.Sequence.Keyspace, @@ -642,7 +611,7 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v autoIncValues := make(sqlparser.ValTuple, 0, len(rows)) for rowNum, row := range rows { if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } // Support the DEFAULT keyword by treating it as null if _, ok := row[colNum].(*sqlparser.Default); ok { @@ -657,8 +626,8 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, err + panic(err) } } - return gen, nil + return gen } diff --git a/go/vt/vtgate/planbuilder/operators/insert_selection.go b/go/vt/vtgate/planbuilder/operators/insert_selection.go index 5ae49ee2c55..70bda0a990a 100644 --- a/go/vt/vtgate/planbuilder/operators/insert_selection.go +++ b/go/vt/vtgate/planbuilder/operators/insert_selection.go @@ -17,15 +17,14 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // InsertSelection operator represents an INSERT into SELECT FROM query. // It holds the operators for running the selection and insertion. type InsertSelection struct { - Select ops.Operator - Insert ops.Operator + Select Operator + Insert Operator // ForceNonStreaming when true, select first then insert, this is to avoid locking rows by select for insert. ForceNonStreaming bool @@ -34,7 +33,7 @@ type InsertSelection struct { noPredicates } -func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { +func (is *InsertSelection) Clone(inputs []Operator) Operator { return &InsertSelection{ Select: inputs[0], Insert: inputs[1], @@ -42,11 +41,11 @@ func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { } } -func (is *InsertSelection) Inputs() []ops.Operator { - return []ops.Operator{is.Select, is.Insert} +func (is *InsertSelection) Inputs() []Operator { + return []Operator{is.Select, is.Insert} } -func (is *InsertSelection) SetInputs(inputs []ops.Operator) { +func (is *InsertSelection) SetInputs(inputs []Operator) { is.Select = inputs[0] is.Insert = inputs[1] } @@ -58,8 +57,8 @@ func (is *InsertSelection) ShortDescription() string { return "" } -func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*InsertSelection)(nil) +var _ Operator = (*InsertSelection)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 1d50a688df4..35bf26f9793 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -19,24 +19,22 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // Join represents a join. If we have a predicate, this is an inner join. If no predicate exists, it is a cross join type Join struct { - LHS, RHS ops.Operator + LHS, RHS Operator Predicate sqlparser.Expr LeftJoin bool noColumns } -var _ ops.Operator = (*Join)(nil) +var _ Operator = (*Join)(nil) // Clone implements the Operator interface -func (j *Join) Clone(inputs []ops.Operator) ops.Operator { +func (j *Join) Clone(inputs []Operator) Operator { clone := *j clone.LHS = inputs[0] clone.RHS = inputs[1] @@ -48,30 +46,30 @@ func (j *Join) Clone(inputs []ops.Operator) ops.Operator { } } -func (j *Join) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (j *Join) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (j *Join) Inputs() []ops.Operator { - return []ops.Operator{j.LHS, j.RHS} +func (j *Join) Inputs() []Operator { + return []Operator{j.LHS, j.RHS} } // SetInputs implements the Operator interface -func (j *Join) SetInputs(ops []ops.Operator) { +func (j *Join) SetInputs(ops []Operator) { j.LHS, j.RHS = ops[0], ops[1] } -func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (j *Join) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { if j.LeftJoin { // we can't merge outer joins into a single QG - return j, rewrite.SameTree, nil + return j, NoRewrite } lqg, lok := j.LHS.(*QueryGraph) rqg, rok := j.RHS.(*QueryGraph) if !lok || !rok { - return j, rewrite.SameTree, nil + return j, NoRewrite } newOp := &QueryGraph{ @@ -82,23 +80,23 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite if j.Predicate != nil { newOp.collectPredicate(ctx, j.Predicate) } - return newOp, rewrite.NewTree("merge querygraphs into a single one"), nil + return newOp, Rewrote("merge querygraphs into a single one") } -func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { +func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { if tableExpr.Join == sqlparser.RightJoinType { lhs, rhs = rhs, lhs } subq, _ := getSubQuery(tableExpr.Condition.On) if subq != nil { - return nil, vterrors.VT12001("subquery in outer join predicate") + panic(vterrors.VT12001("subquery in outer join predicate")) } predicate := tableExpr.Condition.On sqlparser.RemoveKeyspaceFromColName(predicate) - return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}, nil + return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate} } -func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Operator { +func createJoin(ctx *plancontext.PlanningContext, LHS, RHS Operator) Operator { lqg, lok := LHS.(*QueryGraph) rqg, rok := RHS.(*QueryGraph) if lok && rok { @@ -112,7 +110,7 @@ func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Ope return &Join{LHS: LHS, RHS: RHS} } -func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { +func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { op := createJoin(ctx, lhs, rhs) sqc := &SubQueryBuilder{} outerID := TableID(op) @@ -120,37 +118,34 @@ func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.Join sqlparser.RemoveKeyspaceFromColName(joinPredicate) exprs := sqlparser.SplitAndExpression(nil, joinPredicate) for _, pred := range exprs { - subq, err := sqc.handleSubquery(ctx, pred, outerID) - if err != nil { - return nil, err - } + subq := sqc.handleSubquery(ctx, pred, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, pred) } - return sqc.getRootOperator(op, nil), nil + return sqc.getRootOperator(op, nil) } -func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, j, expr, false, newFilter) } var _ JoinOp = (*Join)(nil) -func (j *Join) GetLHS() ops.Operator { +func (j *Join) GetLHS() Operator { return j.LHS } -func (j *Join) GetRHS() ops.Operator { +func (j *Join) GetRHS() Operator { return j.RHS } -func (j *Join) SetLHS(operator ops.Operator) { +func (j *Join) SetLHS(operator Operator) { j.LHS = operator } -func (j *Join) SetRHS(operator ops.Operator) { +func (j *Join) SetRHS(operator Operator) { j.RHS = operator } diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go index 52c9c4e5837..dfd89013e94 100644 --- a/go/vt/vtgate/planbuilder/operators/join_merging.go +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -21,14 +21,13 @@ import ( "reflect" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeJoinInputs checks whether two operators can be merged into a single one. // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. -func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) *Route { +func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m merger) *Route { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { return nil @@ -66,7 +65,7 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, jo } } -func prepareInputRoutes(lhs ops.Operator, rhs ops.Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { +func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) if lhsRoute == nil || rhsRoute == nil { return nil, nil, nil, nil, 0, 0, false diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go index ad61a6c5a00..266b9b8288f 100644 --- a/go/vt/vtgate/planbuilder/operators/joins.go +++ b/go/vt/vtgate/planbuilder/operators/joins.go @@ -18,17 +18,16 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type JoinOp interface { - ops.Operator - GetLHS() ops.Operator - GetRHS() ops.Operator - SetLHS(ops.Operator) - SetRHS(ops.Operator) + Operator + GetLHS() Operator + GetRHS() Operator + SetLHS(Operator) + SetRHS(Operator) MakeInner() IsInner() bool AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) @@ -39,8 +38,8 @@ func AddPredicate( join JoinOp, expr sqlparser.Expr, joinPredicates bool, - newFilter func(ops.Operator, sqlparser.Expr) ops.Operator, -) ops.Operator { + newFilter func(Operator, sqlparser.Expr) Operator, +) Operator { deps := ctx.SemTable.RecursiveDeps(expr) switch { case deps.IsSolvedBy(TableID(join.GetLHS())): diff --git a/go/vt/vtgate/planbuilder/operators/limit.go b/go/vt/vtgate/planbuilder/operators/limit.go index a6ea925b135..1ba6b61149d 100644 --- a/go/vt/vtgate/planbuilder/operators/limit.go +++ b/go/vt/vtgate/planbuilder/operators/limit.go @@ -18,12 +18,11 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Limit struct { - Source ops.Operator + Source Operator AST *sqlparser.Limit // Pushed marks whether the limit has been pushed down to the inputs but still need to keep the operator around. @@ -32,22 +31,22 @@ type Limit struct { Pushed bool } -func (l *Limit) Clone(inputs []ops.Operator) ops.Operator { +func (l *Limit) Clone(inputs []Operator) Operator { return &Limit{ Source: inputs[0], AST: sqlparser.CloneRefOfLimit(l.AST), } } -func (l *Limit) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *Limit) Inputs() []Operator { + return []Operator{l.Source} } -func (l *Limit) SetInputs(operators []ops.Operator) { +func (l *Limit) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -68,7 +67,7 @@ func (l *Limit) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return l.Source.GetSelectExprs(ctx) } -func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/misc_routing.go b/go/vt/vtgate/planbuilder/operators/misc_routing.go index 81301f975b4..575aa7b4e9a 100644 --- a/go/vt/vtgate/planbuilder/operators/misc_routing.go +++ b/go/vt/vtgate/planbuilder/operators/misc_routing.go @@ -64,10 +64,9 @@ var ( _ Routing = (*SequenceRouting)(nil) ) -func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace rp.TargetDestination = tr.TargetDestination - return nil } func (tr *TargetedRouting) Clone() Routing { @@ -75,8 +74,8 @@ func (tr *TargetedRouting) Clone() Routing { return &newTr } -func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) (Routing, error) { - return tr, nil +func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) Routing { + return tr } func (tr *TargetedRouting) Cost() int { @@ -91,17 +90,16 @@ func (tr *TargetedRouting) Keyspace() *vindexes.Keyspace { return tr.keyspace } -func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = n.keyspace - return nil } func (n *NoneRouting) Clone() Routing { return n } -func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return n, nil +func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return n } func (n *NoneRouting) Cost() int { @@ -116,9 +114,8 @@ func (n *NoneRouting) Keyspace() *vindexes.Keyspace { return n.keyspace } -func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = rr.keyspace - return nil } func (rr *AnyShardRouting) Clone() Routing { @@ -128,8 +125,8 @@ func (rr *AnyShardRouting) Clone() Routing { } } -func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return rr, nil +func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return rr } func (rr *AnyShardRouting) Cost() int { @@ -159,16 +156,14 @@ func (rr *AnyShardRouting) AlternateInKeyspace(keyspace *vindexes.Keyspace) *Rou return nil } -func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) error { - return nil -} +func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) {} func (dr *DualRouting) Clone() Routing { return &DualRouting{} } -func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return dr, nil +func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return dr } func (dr *DualRouting) Cost() int { @@ -183,18 +178,17 @@ func (dr *DualRouting) Keyspace() *vindexes.Keyspace { return nil } -func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Opcode = engine.Next rp.Keyspace = sr.keyspace - return nil } func (sr *SequenceRouting) Clone() Routing { return &SequenceRouting{keyspace: sr.keyspace} } -func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return sr, nil +func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return sr } func (sr *SequenceRouting) Cost() int { diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go index d2fc266790c..6de7a2be2b0 100644 --- a/go/vt/vtgate/planbuilder/operators/offset_planning.go +++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go @@ -21,36 +21,30 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) // planOffsets will walk the tree top down, adding offset information to columns in the tree for use in further optimization, -func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { +func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator { type offsettable interface { - planOffsets(ctx *plancontext.PlanningContext) ops.Operator + planOffsets(ctx *plancontext.PlanningContext) Operator } - visitor := func(in ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - var err error + visitor := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { switch op := in.(type) { case *Horizon: - return nil, nil, vterrors.VT13001(fmt.Sprintf("should not see %T here", in)) + panic(vterrors.VT13001(fmt.Sprintf("should not see %T here", in))) case offsettable: newOp := op.planOffsets(ctx) if newOp != nil { - return newOp, rewrite.NewTree("new operator after offset planning"), nil + return newOp, Rewrote("new operator after offset planning") } } - if err != nil { - return nil, nil, err - } - return in, rewrite.SameTree, nil + return in, NoRewrite } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } func fetchByOffset(e sqlparser.SQLNode) bool { @@ -63,7 +57,7 @@ func fetchByOffset(e sqlparser.SQLNode) bool { } // useOffsets rewrites an expression to use values from the input -func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) sqlparser.Expr { +func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) sqlparser.Expr { var exprOffset *sqlparser.Offset in := op.Inputs()[0] @@ -93,17 +87,17 @@ func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Op // addColumnsToInput adds columns needed by an operator to its input. // This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addColumnsToInput(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { filter, ok := in.(*Filter) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } proj, areOnTopOfProj := filter.Source.(selectExpressions) if !areOnTopOfProj { // not much we can do here - return in, rewrite.SameTree, nil + return in, NoRewrite } addedColumns := false found := func(expr sqlparser.Expr, i int) {} @@ -119,22 +113,22 @@ func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops _ = sqlparser.CopyOnRewrite(expr, visitor, nil, ctx.SemTable.CopySemanticInfo) } if addedColumns { - return in, rewrite.NewTree("added columns because filter needs it"), nil + return in, Rewrote("added columns because filter needs it") } - return in, rewrite.SameTree, nil + return in, NoRewrite } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } // addColumnsToInput adds columns needed by an operator to its input. // This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func pullDistinctFromUNION(_ *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { union, ok := in.(*Union) if !ok || !union.distinct { - return in, rewrite.SameTree, nil + return in, NoRewrite } union.distinct = false @@ -143,10 +137,10 @@ func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (o Required: true, Source: union, } - return distinct, rewrite.NewTree("pulled out DISTINCT from union"), nil + return distinct, Rewrote("pulled out DISTINCT from union") } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } func getOffsetRewritingVisitor( diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go index b165c5345b0..d639643dda1 100644 --- a/go/vt/vtgate/planbuilder/operators/operator.go +++ b/go/vt/vtgate/planbuilder/operators/operator.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2022 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -36,138 +36,61 @@ The operators go through a few phases while planning: package operators import ( - "fmt" - - "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( - // helper type that implements Inputs() returning nil - noInputs struct{} + // Operator forms the tree of operators, representing the declarative query provided. + // The operator tree is no actually runnable, it's an intermediate representation used + // while query planning + // The mental model are operators that pull data from each other, the root being the + // full query output, and the leaves are most often `Route`s, representing communication + // with one or more shards. We want to push down as much work as possible under these Routes + Operator interface { + // Clone will return a copy of this operator, protected so changed to the original will not impact the clone + Clone(inputs []Operator) Operator - // helper type that implements AddColumn() returning an error - noColumns struct{} + // Inputs returns the inputs for this operator + Inputs() []Operator - // helper type that implements AddPredicate() returning an error - noPredicates struct{} -) + // SetInputs changes the inputs for this op + SetInputs([]Operator) -// PlanQuery creates a query plan for a given SQL statement -func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result ops.Operator, err error) { - defer PanicHandler(&err) + // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. + // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, + // where data is fetched from the LHS of the join to be used in the evaluation on the RHS + // TODO: we should remove this and replace it with rewriters + AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - op, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } + AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - if rewrite.DebugOperatorTree { - fmt.Println("Initial tree:") - fmt.Println(ops.ToTree(op)) - } + FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - if op, err = compact(ctx, op); err != nil { - return nil, err - } + GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr + GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs - if err = checkValid(op); err != nil { - return nil, err - } - - if op, err = planQuery(ctx, op); err != nil { - return nil, err - } + ShortDescription() string - _, isRoute := op.(*Route) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - // If we got here, we don't have a single shard plan - return nil, ctx.SemTable.NotSingleRouteErr + GetOrdering(ctx *plancontext.PlanningContext) []OrderBy } - return op, err -} + // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. + OrderBy struct { + Inner *sqlparser.Order -func PanicHandler(err *error) { - if r := recover(); r != nil { - badness, ok := r.(error) - if !ok { - panic(r) - } - - *err = badness - } -} - -// Inputs implements the Operator interface -func (noInputs) Inputs() []ops.Operator { - return nil -} - -// SetInputs implements the Operator interface -func (noInputs) SetInputs(ops []ops.Operator) { - if len(ops) > 0 { - panic("the noInputs operator does not have inputs") - } -} - -// AddColumn implements the Operator interface -func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -// AddPredicate implements the Operator interface -func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) ops.Operator { - panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) -} - -// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us -func tryTruncateColumnsAt(op ops.Operator, truncateAt int) bool { - type columnTruncator interface { - setTruncateColumnCount(offset int) - } - - truncator, ok := op.(columnTruncator) - if ok { - truncator.setTruncateColumnCount(truncateAt) - return true + // See GroupBy#SimplifiedExpr for more details about this + SimplifiedExpr sqlparser.Expr } +) - switch op := op.(type) { - case *Limit: - return tryTruncateColumnsAt(op.Source, truncateAt) - case *SubQuery: - for _, offset := range op.Vars { - if offset >= truncateAt { - return false - } - } - return tryTruncateColumnsAt(op.Outer, truncateAt) - default: - return false +// Map takes in a mapping function and applies it to both the expression in OrderBy. +func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { + return OrderBy{ + Inner: &sqlparser.Order{ + Expr: mappingFunc(ob.Inner.Expr), + Direction: ob.Inner.Direction, + }, + SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), } } - -func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op ops.Operator) sqlparser.SelectExprs { - columns := op.GetColumns(ctx) - selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { - return from - }) - return selExprs -} diff --git a/go/vt/vtgate/planbuilder/operators/operator_funcs.go b/go/vt/vtgate/planbuilder/operators/operator_funcs.go index 7f7aaff29c5..cc3007438fa 100644 --- a/go/vt/vtgate/planbuilder/operators/operator_funcs.go +++ b/go/vt/vtgate/planbuilder/operators/operator_funcs.go @@ -21,13 +21,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // RemovePredicate is used when we turn a predicate into a plan operator, // and the predicate needs to be removed as an AST construct -func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { +func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) (Operator, error) { switch op := op.(type) { case *Route: newSrc, err := RemovePredicate(ctx, expr, op.Source) diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go deleted file mode 100644 index 1117b947814..00000000000 --- a/go/vt/vtgate/planbuilder/operators/ops/op.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ops - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -type ( - // Operator forms the tree of operators, representing the declarative query provided. - // The operator tree is no actually runnable, it's an intermediate representation used - // while query planning - // The mental model are operators that pull data from each other, the root being the - // full query output, and the leaves are most often `Route`s, representing communication - // with one or more shards. We want to push down as much work as possible under these Routes - Operator interface { - // Clone will return a copy of this operator, protected so changed to the original will not impact the clone - Clone(inputs []Operator) Operator - - // Inputs returns the inputs for this operator - Inputs() []Operator - - // SetInputs changes the inputs for this op - SetInputs([]Operator) - - // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. - // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, - // where data is fetched from the LHS of the join to be used in the evaluation on the RHS - // TODO: we should remove this and replace it with rewriters - AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - - AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - - FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - - GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr - GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs - - ShortDescription() string - - GetOrdering(ctx *plancontext.PlanningContext) []OrderBy - } - - // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. - OrderBy struct { - Inner *sqlparser.Order - - // See GroupBy#SimplifiedExpr for more details about this - SimplifiedExpr sqlparser.Expr - } -) - -// Map takes in a mapping function and applies it to both the expression in OrderBy. -func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { - return OrderBy{ - Inner: &sqlparser.Order{ - Expr: mappingFunc(ob.Inner.Expr), - Direction: ob.Inner.Direction, - }, - SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), - } -} diff --git a/go/vt/vtgate/planbuilder/operators/ordering.go b/go/vt/vtgate/planbuilder/operators/ordering.go index 66436f6a47d..bc088ca2220 100644 --- a/go/vt/vtgate/planbuilder/operators/ordering.go +++ b/go/vt/vtgate/planbuilder/operators/ordering.go @@ -22,20 +22,19 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Ordering struct { - Source ops.Operator + Source Operator Offset []int WOffset []int - Order []ops.OrderBy + Order []OrderBy ResultColumns int } -func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { +func (o *Ordering) Clone(inputs []Operator) Operator { return &Ordering{ Source: inputs[0], Offset: slices.Clone(o.Offset), @@ -45,15 +44,15 @@ func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { } } -func (o *Ordering) Inputs() []ops.Operator { - return []ops.Operator{o.Source} +func (o *Ordering) Inputs() []Operator { + return []Operator{o.Source} } -func (o *Ordering) SetInputs(operators []ops.Operator) { +func (o *Ordering) SetInputs(operators []Operator) { o.Source = operators[0] } -func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { o.Source = o.Source.AddPredicate(ctx, expr) return o } @@ -74,11 +73,11 @@ func (o *Ordering) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Se return o.Source.GetSelectExprs(ctx) } -func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []OrderBy { return o.Order } -func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) Operator { for _, order := range o.Order { offset := o.Source.AddColumn(ctx, true, false, aeWrap(order.SimplifiedExpr)) o.Offset = append(o.Offset, offset) @@ -96,7 +95,7 @@ func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { } func (o *Ordering) ShortDescription() string { - ordering := slice.Map(o.Order, func(o ops.OrderBy) string { + ordering := slice.Map(o.Order, func(o OrderBy) string { return sqlparser.String(o.SimplifiedExpr) }) return strings.Join(ordering, ", ") diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go index 557124e9320..8a47507a526 100644 --- a/go/vt/vtgate/planbuilder/operators/phases.go +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -20,8 +20,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -79,7 +77,7 @@ func (p Phase) shouldRun(s semantics.QuerySignature) bool { } } -func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func (p Phase) act(ctx *plancontext.PlanningContext, op Operator) Operator { switch p { case pullDistinctFromUnion: return pullDistinctFromUNION(ctx, op) @@ -90,9 +88,9 @@ func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Opera case cleanOutPerfDistinct: return removePerformanceDistinctAboveRoute(ctx, op) case subquerySettling: - return settleSubqueries(ctx, op), nil + return settleSubqueries(ctx, op) default: - return op, nil + return op } } @@ -115,51 +113,47 @@ func (p *phaser) next(ctx *plancontext.PlanningContext) Phase { } } -func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { - return rewrite.BottomUp(op, TableID, func(innerOp ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { +func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op Operator) Operator { + return BottomUp(op, TableID, func(innerOp Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { d, ok := innerOp.(*Distinct) if !ok || d.Required { - return innerOp, rewrite.SameTree, nil + return innerOp, NoRewrite } - return d.Source, rewrite.NewTree("removed distinct not required that was not pushed under route"), nil + return d.Source, Rewrote("removed distinct not required that was not pushed under route") }, stopAtRoute) } -func enableDelegateAggregation(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func enableDelegateAggregation(ctx *plancontext.PlanningContext, op Operator) Operator { return addColumnsToInput(ctx, op) } // addOrderingForAllAggregations is run we have pushed down Aggregators as far down as possible. -func addOrderingForAllAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addOrderingForAllAggregations(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { aggrOp, ok := in.(*Aggregator) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } - requireOrdering, err := needsOrdering(ctx, aggrOp) - if err != nil { - return nil, nil, err - } - - var res *rewrite.ApplyResult + requireOrdering := needsOrdering(ctx, aggrOp) + var res *ApplyResult if requireOrdering { addOrderingFor(aggrOp) - res = rewrite.NewTree("added ordering before aggregation") + res = Rewrote("added ordering before aggregation") } - return in, res, nil + return in, res } - return rewrite.BottomUp(root, TableID, visitor, stopAtRoute) + return BottomUp(root, TableID, visitor, stopAtRoute) } func addOrderingFor(aggrOp *Aggregator) { - orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) ops.OrderBy { + orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) OrderBy { return from.AsOrderBy() }) if aggrOp.DistinctExpr != nil { - orderBys = append(orderBys, ops.OrderBy{ + orderBys = append(orderBys, OrderBy{ Inner: &sqlparser.Order{ Expr: aggrOp.DistinctExpr, }, @@ -172,7 +166,7 @@ func addOrderingFor(aggrOp *Aggregator) { } } -func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, error) { +func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) bool { requiredOrder := slice.Map(in.Grouping, func(from GroupBy) sqlparser.Expr { return from.SimplifiedExpr }) @@ -180,44 +174,44 @@ func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, erro requiredOrder = append(requiredOrder, in.DistinctExpr) } if len(requiredOrder) == 0 { - return false, nil + return false } srcOrdering := in.Source.GetOrdering(ctx) if len(srcOrdering) < len(requiredOrder) { - return true, nil + return true } for idx, gb := range requiredOrder { if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb) { - return true, nil + return true } } - return false, nil + return false } -func addGroupByOnRHSOfJoin(root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addGroupByOnRHSOfJoin(root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { join, ok := in.(*ApplyJoin) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } return addLiteralGroupingToRHS(join) } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } -func addLiteralGroupingToRHS(in *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { - _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { +func addLiteralGroupingToRHS(in *ApplyJoin) (Operator, *ApplyResult) { + _ = Visit(in.RHS, func(op Operator) error { aggr, isAggr := op.(*Aggregator) if !isAggr { return nil } if len(aggr.Grouping) == 0 { gb := sqlparser.NewIntLiteral(".0") - aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) + aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb)) } return nil }) - return in, rewrite.SameTree, nil + return in, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/plan_query.go b/go/vt/vtgate/planbuilder/operators/plan_query.go new file mode 100644 index 00000000000..811f0c8dc76 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/plan_query.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package operators contains the operators used to plan queries. +/* +The operators go through a few phases while planning: +1. Initial plan + In this first pass, we build an operator tree from the incoming parsed query. + At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause + that we can easily do join ordering on because they are all inner joins. + All the post-processing - aggregations, sorting, limit etc. are at this stage + contained in Horizon structs. We try to push these down under routes, and expand + the ones that can't be pushed down into individual operators such as Projection, + Agreggation, Limit, etc. +2. Planning + Once the initial plan has been fully built, we go through a number of phases. + recursively running rewriters on the tree in a fixed point fashion, until we've gone + over all phases and the tree has stop changing. +3. Offset planning + Now is the time to stop working with AST objects and transform remaining expressions being + used on top of vtgate to either offsets on inputs or evalengine expressions. +*/ +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type ( + // helper type that implements Inputs() returning nil + noInputs struct{} + + // helper type that implements AddColumn() returning an error + noColumns struct{} + + // helper type that implements AddPredicate() returning an error + noPredicates struct{} +) + +// PlanQuery creates a query plan for a given SQL statement +func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result Operator, err error) { + defer PanicHandler(&err) + + op := translateQueryToOp(ctx, stmt) + + if DebugOperatorTree { + fmt.Println("Initial tree:") + fmt.Println(ToTree(op)) + } + + op = compact(ctx, op) + if err = checkValid(op); err != nil { + return nil, err + } + + if op, err = planQuery(ctx, op); err != nil { + return nil, err + } + + _, isRoute := op.(*Route) + if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { + // If we got here, we don't have a single shard plan + return nil, ctx.SemTable.NotSingleRouteErr + } + + return op, err +} + +func PanicHandler(err *error) { + if r := recover(); r != nil { + badness, ok := r.(error) + if !ok { + panic(r) + } + + *err = badness + } +} + +// Inputs implements the Operator interface +func (noInputs) Inputs() []Operator { + return nil +} + +// SetInputs implements the Operator interface +func (noInputs) SetInputs(ops []Operator) { + if len(ops) > 0 { + panic("the noInputs operator does not have inputs") + } +} + +// AddColumn implements the Operator interface +func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +// AddPredicate implements the Operator interface +func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) Operator { + panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) +} + +// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us +func tryTruncateColumnsAt(op Operator, truncateAt int) bool { + type columnTruncator interface { + setTruncateColumnCount(offset int) + } + + truncator, ok := op.(columnTruncator) + if ok { + truncator.setTruncateColumnCount(truncateAt) + return true + } + + switch op := op.(type) { + case *Limit: + return tryTruncateColumnsAt(op.Source, truncateAt) + case *SubQuery: + for _, offset := range op.Vars { + if offset >= truncateAt { + return false + } + } + return tryTruncateColumnsAt(op.Outer, truncateAt) + default: + return false + } +} + +func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op Operator) sqlparser.SelectExprs { + columns := op.GetColumns(ctx) + selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { + return from + }) + return selExprs +} diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go index 7e9f2d71a71..12b70d3e4ef 100644 --- a/go/vt/vtgate/planbuilder/operators/projection.go +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -25,8 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,7 +32,7 @@ import ( // Projection is used when we need to evaluate expressions on the vtgate // It uses the evalengine to accomplish its goal type Projection struct { - Source ops.Operator + Source Operator // Columns contain the expressions as viewed from the outside of this operator Columns ProjCols @@ -128,7 +126,7 @@ func newProjExprWithInner(ae *sqlparser.AliasedExpr, in sqlparser.Expr) *ProjExp } } -func newAliasedProjection(src ops.Operator) *Projection { +func newAliasedProjection(src Operator) *Projection { return &Projection{ Source: src, Columns: AliasedProjections{}, @@ -194,22 +192,19 @@ var _ selectExpressions = (*Projection)(nil) // createSimpleProjection returns a projection where all columns are offsets. // used to change the name and order of the columns in the final output -func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { +func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection { p := newAliasedProjection(src) for _, e := range qp.SelectExprs { ae, err := e.GetAliasedExpr() if err != nil { - return nil, err + panic(err) } offset := p.Source.AddColumn(ctx, true, false, ae) expr := newProjExpr(ae) expr.Info = Offset(offset) - _, err = p.addProjExpr(expr) - if err != nil { - return nil, err - } + p.addProjExpr(expr) } - return p, nil + return p } // canPush returns false if the projection has subquery expressions in it and the subqueries have not yet @@ -263,57 +258,45 @@ func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Ex return -1 } -func (p *Projection) addProjExpr(pe ...*ProjExpr) (int, error) { +func (p *Projection) addProjExpr(pe ...*ProjExpr) int { ap, err := p.GetAliasedProjections() if err != nil { - return 0, err + panic(err) } offset := len(ap) ap = append(ap, pe...) p.Columns = ap - return offset, nil + return offset } -func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) (int, error) { +func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) int { return p.addProjExpr(newProjExprWithInner(ae, e)) } -func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) error { +func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) { pe := newProjExprWithInner(ae, expr) pe.Info = SubQueryExpression(sqs) - _, err := p.addProjExpr(pe) - return err + _ = p.addProjExpr(pe) } func (p *Projection) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _ bool) int { - column, err := p.addColumn(ctx, true, false, expr, false) - if err != nil { - panic(err) - } - return column + return p.addColumn(ctx, true, false, expr, false) } func (p *Projection) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) []int { offsets := make([]int, len(exprs)) for idx, expr := range exprs { - offset, err := p.addColumn(ctx, reuse, false, expr, false) - if err != nil { - panic(err) - } + offset := p.addColumn(ctx, reuse, false, expr, false) offsets[idx] = offset } return offsets } func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) int { - column, err := p.addColumn(ctx, reuse, addToGroupBy, ae, true) - if err != nil { - panic(err) - } - return column + return p.addColumn(ctx, reuse, addToGroupBy, ae, true) } func (p *Projection) addColumn( @@ -322,13 +305,13 @@ func (p *Projection) addColumn( addToGroupBy bool, ae *sqlparser.AliasedExpr, push bool, -) (int, error) { +) int { expr := p.DT.RewriteExpression(ctx, ae.Expr) if reuse { offset := p.FindCol(ctx, expr, false) if offset >= 0 { - return offset, nil + return offset } } @@ -337,7 +320,7 @@ func (p *Projection) addColumn( if ok { cols, ok := p.Columns.(AliasedProjections) if !ok { - return 0, vterrors.VT09015() + panic(vterrors.VT09015()) } for _, projExpr := range cols { if ctx.SemTable.EqualsExprWithDeps(ws.Expr, projExpr.ColExpr) { @@ -364,7 +347,7 @@ func (po Offset) expr() {} func (po *EvalEngine) expr() {} func (po SubQueryExpression) expr() {} -func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { +func (p *Projection) Clone(inputs []Operator) Operator { return &Projection{ Source: inputs[0], Columns: p.Columns, // TODO don't think we need to deep clone here @@ -373,15 +356,15 @@ func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { } } -func (p *Projection) Inputs() []ops.Operator { - return []ops.Operator{p.Source} +func (p *Projection) Inputs() []Operator { + return []Operator{p.Source} } -func (p *Projection) SetInputs(operators []ops.Operator) { +func (p *Projection) SetInputs(operators []Operator) { p.Source = operators[0] } -func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // we just pass through the predicate to our source p.Source = p.Source.AddPredicate(ctx, expr) return p @@ -412,7 +395,7 @@ func (p *Projection) GetSelectExprs(*plancontext.PlanningContext) sqlparser.Sele } } -func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return p.Source.GetOrdering(ctx) } @@ -454,10 +437,10 @@ func (p *Projection) ShortDescription() string { return strings.Join(result, ", ") } -func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } // for projections that are not derived tables, we can check if it is safe to remove or not @@ -471,7 +454,7 @@ func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *r } if !needed { - return p.Source, rewrite.NewTree("removed projection only passing through the input"), nil + return p.Source, Rewrote("removed projection only passing through the input") } switch src := p.Source.(type) { @@ -480,13 +463,13 @@ func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *r case *ApplyJoin: return p.compactWithJoin(ctx, src) } - return p, rewrite.SameTree, nil + return p, NoRewrite } -func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } var newColumns []int @@ -499,49 +482,46 @@ func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *App case nil: if !ctx.SemTable.EqualsExprWithDeps(col.EvalExpr, col.ColExpr) { // the inner expression is different from what we are presenting to the outside - this means we need to evaluate - return p, rewrite.SameTree, nil + return p, NoRewrite } offset := slices.IndexFunc(join.JoinColumns, func(jc JoinColumn) bool { - return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.ColExpr) + return ctx.SemTable.EqualsExprWithDeps(jc.Original, col.ColExpr) }) if offset < 0 { - return p, rewrite.SameTree, nil + return p, NoRewrite } if len(join.Columns) > 0 { newColumns = append(newColumns, join.Columns[offset]) } newColumnsAST = append(newColumnsAST, join.JoinColumns[offset]) default: - return p, rewrite.SameTree, nil + return p, NoRewrite } } join.Columns = newColumns join.JoinColumns = newColumnsAST - return join, rewrite.NewTree("remove projection from before join"), nil + return join, Rewrote("remove projection from before join") } -func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } for i, col := range ap { offset, ok := col.Info.(Offset) if !ok || int(offset) != i { - return p, rewrite.SameTree, nil + return p, NoRewrite } } columns := rb.GetColumns(ctx) - if err != nil { - return nil, nil, err - } if len(columns) == len(ap) { - return rb, rewrite.NewTree("remove projection from before route"), nil + return rb, Rewrote("remove projection from before route") } rb.ResultColumns = len(columns) - return rb, rewrite.SameTree, nil + return rb, NoRewrite } // needsEvaluation finds the expression given by this argument and checks if the inside and outside expressions match @@ -561,7 +541,7 @@ func (p *Projection) needsEvaluation(ctx *plancontext.PlanningContext, e sqlpars return false } -func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) Operator { ap, err := p.GetAliasedProjections() if err != nil { panic(err) diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go index ab140faf9b9..0994ee4402a 100644 --- a/go/vt/vtgate/planbuilder/operators/query_planning.go +++ b/go/vt/vtgate/planbuilder/operators/query_planning.go @@ -21,8 +21,6 @@ import ( "io" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,26 +33,20 @@ type ( } ) -func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { +func planQuery(ctx *plancontext.PlanningContext, root Operator) (output Operator, err error) { output, err = runPhases(ctx, root) if err != nil { return nil, err } - output, err = planOffsets(ctx, output) - if err != nil { - return nil, err - } + output = planOffsets(ctx, output) - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Println("After offset planning:") - fmt.Println(ops.ToTree(output)) + fmt.Println(ToTree(output)) } - output, err = compact(ctx, output) - if err != nil { - return nil, err - } + output = compact(ctx, output) return addTruncationOrProjectionToReturnOutput(ctx, root, output) } @@ -63,17 +55,17 @@ func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops. // If we can push it under a route - done. // If we can't, we will instead expand the Horizon into // smaller operators and try to push these down as far as possible -func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Operator, err error) { +func runPhases(ctx *plancontext.PlanningContext, root Operator) (op Operator, err error) { op = root p := phaser{} for phase := p.next(ctx); phase != DONE; phase = p.next(ctx) { ctx.CurrentPhase = int(phase) - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Printf("PHASE: %s\n", phase.String()) } - op, err = phase.act(ctx, op) + op = phase.act(ctx, op) if err != nil { return nil, err } @@ -83,17 +75,17 @@ func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Oper return nil, err } - op, err = compact(ctx, op) + op = compact(ctx, op) if err != nil { return nil, err } } - return addGroupByOnRHSOfJoin(op) + return addGroupByOnRHSOfJoin(op), nil } -func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func runRewriters(ctx *plancontext.PlanningContext, root Operator) (Operator, error) { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch in := in.(type) { case *Horizon: return pushOrExpandHorizon(ctx, in) @@ -120,23 +112,23 @@ func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Oper case *LockAndComment: return pushLockAndComment(in) default: - return in, rewrite.SameTree, nil + return in, NoRewrite } } - return rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) + return FixedPointBottomUp(root, TableID, visitor, stopAtRoute), nil } -func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, error) { +func pushLockAndComment(l *LockAndComment) (Operator, *ApplyResult) { switch src := l.Source.(type) { case *Horizon, *QueryGraph: // we want to wait until the horizons have been pushed under a route or expanded // that way we know that we've replaced the QueryGraphs with Routes - return l, rewrite.SameTree, nil + return l, NoRewrite case *Route: src.Comments = l.Comments src.Lock = l.Lock - return src, rewrite.NewTree("put lock and comment into route"), nil + return src, Rewrote("put lock and comment into route") default: inputs := src.Inputs() for i, op := range inputs { @@ -147,23 +139,20 @@ func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, } } src.SetInputs(inputs) - return src, rewrite.NewTree("pushed down lock and comments"), nil + return src, Rewrote("pushed down lock and comments") } } -func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (Operator, *ApplyResult) { if in.IsDerived() { - newOp, result, err := pushDerived(ctx, in) - if err != nil { - return nil, nil, err - } - if result != rewrite.SameTree { - return newOp, result, nil + newOp, result := pushDerived(ctx, in) + if result != NoRewrite { + return newOp, result } } if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } if ctx.SemTable.QuerySignature.SubQueries { @@ -172,15 +161,12 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope rb, isRoute := in.src().(*Route) if isRoute && rb.IsSingleShard() { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } sel, isSel := in.selectStatement().(*sqlparser.Select) - qp, err := in.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := in.getQP(ctx) needsOrdering := len(qp.OrderExprs) > 0 hasHaving := isSel && sel.Having != nil @@ -193,7 +179,7 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope in.selectStatement().GetLimit() == nil if canPush { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } return expandHorizon(ctx, in) @@ -202,42 +188,42 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope func tryPushProjection( ctx *plancontext.PlanningContext, p *Projection, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { switch src := p.Source.(type) { case *Route: - return rewrite.Swap(p, src, "push projection under route") + return Swap(p, src, "push projection under route") case *ApplyJoin: if p.FromAggr || !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionInApplyJoin(ctx, p, src) case *Vindex: if !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionInVindex(ctx, p, src) case *SubQueryContainer: if !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionToOuterContainer(ctx, p, src) case *SubQuery: return pushProjectionToOuter(ctx, p, src) case *Limit: - return rewrite.Swap(p, src, "push projection under limit") + return Swap(p, src, "push projection under limit") default: - return p, rewrite.SameTree, nil + return p, NoRewrite } } -func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { +func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } if !reachedPhase(ctx, subquerySettling) || err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } outer := TableID(sq.Outer) @@ -248,7 +234,7 @@ func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq * } if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil + return p, NoRewrite } se, ok := pe.Info.(SubQueryExpression) @@ -258,22 +244,22 @@ func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq * } // all projections can be pushed to the outer sq.Outer, p.Source = p, sq.Outer - return sq, rewrite.NewTree("push projection into outer side of subquery"), nil + return sq, Rewrote("push projection into outer side of subquery") } func pushProjectionInVindex( ctx *plancontext.PlanningContext, p *Projection, src *Vindex, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return nil, nil, err + panic(err) } for _, pe := range ap { src.AddColumn(ctx, true, false, aeWrap(pe.EvalExpr)) } - return src, rewrite.NewTree("push projection into vindex"), nil + return src, Rewrote("push projection into vindex") } func (p *projector) add(pe *ProjExpr, col *sqlparser.IdentifierCI) { @@ -291,11 +277,11 @@ func pushProjectionInApplyJoin( ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if src.LeftJoin || err != nil { // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed - return p, rewrite.SameTree, nil + return p, NoRewrite } lhs, rhs := &projector{}, &projector{} if p.DT != nil && len(p.DT.Columns) > 0 { @@ -309,31 +295,18 @@ func pushProjectionInApplyJoin( if p.DT != nil && idx < len(p.DT.Columns) { col = &p.DT.Columns[idx] } - err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) - if err != nil { - return nil, nil, err - } + splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) } if p.isDerived() { - err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) - if err != nil { - return nil, nil, err - } + exposeColumnsThroughDerivedTable(ctx, p, src, lhs) } // Create and update the Projection operators for the left and right children, if needed. - src.LHS, err = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) - if err != nil { - return nil, nil, err - } + src.LHS = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) + src.RHS = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) - src.RHS, err = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) - if err != nil { - return nil, nil, err - } - - return src, rewrite.NewTree("split projection to either side of join"), nil + return src, Rewrote("split projection to either side of join") } // splitProjectionAcrossJoin creates JoinPredicates for all projections, @@ -344,21 +317,16 @@ func splitProjectionAcrossJoin( lhs, rhs *projector, pe *ProjExpr, colAlias *sqlparser.IdentifierCI, -) error { +) { // Check if the current expression can reuse an existing column in the ApplyJoin. if _, found := canReuseColumn(ctx, join.JoinColumns, pe.EvalExpr, joinColumnToExpr); found { - return nil - } - - col, err := splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias) - if err != nil { - return err + return } // Add the new JoinColumn to the ApplyJoin's JoinPredicates. - join.JoinColumns = append(join.JoinColumns, col) - return nil + join.JoinColumns = append(join.JoinColumns, + splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias)) } func splitUnexploredExpression( @@ -367,12 +335,9 @@ func splitUnexploredExpression( lhs, rhs *projector, pe *ProjExpr, colAlias *sqlparser.IdentifierCI, -) (JoinColumn, error) { +) JoinColumn { // Get a JoinColumn for the current expression. - col, err := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) - if err != nil { - return JoinColumn{}, err - } + col := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) // Update the left and right child columns and names based on the JoinColumn type. switch { @@ -395,7 +360,7 @@ func splitUnexploredExpression( innerPE.Info = pe.Info rhs.add(innerPE, colAlias) } - return col, nil + return col } // exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table @@ -410,25 +375,25 @@ func splitUnexploredExpression( // The function iterates through each join predicate, rewriting the expressions in the predicate's // LHS expressions to include the derived table. This allows the expressions to be accessed outside // the derived table. -func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { +func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) { derivedTbl, err := ctx.SemTable.TableInfoFor(p.DT.TableID) if err != nil { - return err + panic(err) } derivedTblName, err := derivedTbl.Name() if err != nil { - return err + panic(err) } for _, predicate := range src.JoinPredicates { for idx, bve := range predicate.LHSExprs { expr := bve.Expr tbl, err := ctx.SemTable.TableInfoForExpr(expr) if err != nil { - return err + panic(err) } tblName, err := tbl.Name() if err != nil { - return err + panic(err) } expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) @@ -445,7 +410,6 @@ func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Proje lhs.add(projExpr, colAlias) } } - return nil } // prefixColNames adds qualifier prefixes to all ColName:s. @@ -462,17 +426,14 @@ func prefixColNames(ctx *plancontext.PlanningContext, tblName sqlparser.TableNam func createProjectionWithTheseColumns( ctx *plancontext.PlanningContext, - src ops.Operator, + src Operator, p *projector, dt *DerivedTable, -) (ops.Operator, error) { +) Operator { if len(p.columns) == 0 { - return src, nil - } - proj, err := createProjection(ctx, src) - if err != nil { - return nil, err + return src } + proj := createProjection(ctx, src) proj.Columns = AliasedProjections(p.columns) if dt != nil { kopy := *dt @@ -480,42 +441,42 @@ func createProjectionWithTheseColumns( proj.DT = &kopy } - return proj, nil + return proj } -func tryPushLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushLimit(in *Limit) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Route: return tryPushingDownLimitInRoute(in, src) case *Aggregator: - return in, rewrite.SameTree, nil + return in, NoRewrite default: return setUpperLimit(in) } } -func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushingDownLimitInRoute(in *Limit, src *Route) (Operator, *ApplyResult) { if src.IsSingleShard() { - return rewrite.Swap(in, src, "push limit under route") + return Swap(in, src, "push limit under route") } return setUpperLimit(in) } -func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { +func setUpperLimit(in *Limit) (Operator, *ApplyResult) { if in.Pushed { - return in, rewrite.SameTree, nil + return in, NoRewrite } in.Pushed = true - visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - return op, rewrite.SameTree, nil + visitor := func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + return op, NoRewrite } - var result *rewrite.ApplyResult - shouldVisit := func(op ops.Operator) rewrite.VisitRule { + var result *ApplyResult + shouldVisit := func(op Operator) VisitRule { switch op := op.(type) { case *Join, *ApplyJoin, *SubQueryContainer, *SubQuery: // we can't push limits down on either side - return rewrite.SkipChildren + return SkipChildren case *Route: newSrc := &Limit{ Source: op.Source, @@ -523,48 +484,46 @@ func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { Pushed: false, } op.Source = newSrc - result = result.Merge(rewrite.NewTree("push limit under route")) - return rewrite.SkipChildren + result = result.Merge(Rewrote("push limit under route")) + return SkipChildren default: - return rewrite.VisitChildren + return VisitChildren } } - _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) - if err != nil { - return nil, nil, err - } - return in, result, nil + TopDown(in.Source, TableID, visitor, shouldVisit) + + return in, result } -func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Route: - return rewrite.Swap(in, src, "push ordering under route") + return Swap(in, src, "push ordering under route") case *Filter: - return rewrite.Swap(in, src, "push ordering under filter") + return Swap(in, src, "push ordering under filter") case *ApplyJoin: if canPushLeft(ctx, src, in.Order) { // ApplyJoin is stable in regard to the columns coming from the LHS, // so if all the ordering columns come from the LHS, we can push down the Ordering there src.LHS, in.Source = in, src.LHS - return src, rewrite.NewTree("push down ordering on the LHS of a join"), nil + return src, Rewrote("push down ordering on the LHS of a join") } case *Ordering: // we'll just remove the order underneath. The top order replaces whatever was incoming in.Source = src.Source - return in, rewrite.NewTree("remove double ordering"), nil + return in, Rewrote("remove double ordering") case *Projection: // we can move ordering under a projection if it's not introducing a column we're sorting by for _, by := range in.Order { if !fetchByOffset(by.SimplifiedExpr) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } - return rewrite.Swap(in, src, "push ordering under projection") + return Swap(in, src, "push ordering under projection") case *Aggregator: if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) { - return in, rewrite.SameTree, nil + return in, NoRewrite } return pushOrderingUnderAggr(ctx, in, src) @@ -573,26 +532,26 @@ func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operat for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery"), nil + return src, Rewrote("push ordering into outer side of subquery") case *SubQuery: outerTableID := TableID(src.Outer) for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery"), nil + return src, Rewrote("push ordering into outer side of subquery") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { +func overlaps(ctx *plancontext.PlanningContext, order []OrderBy, grouping []GroupBy) bool { ordering: for _, orderBy := range order { for _, groupBy := range grouping { @@ -606,13 +565,13 @@ ordering: return true } -func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (Operator, *ApplyResult) { // If Aggregator is a derived table, then we should rewrite the ordering before pushing. if aggregator.isDerived() { for idx, orderExpr := range order.Order { ti, err := ctx.SemTable.TableInfoFor(aggregator.DT.TableID) if err != nil { - return nil, nil, err + panic(err) } newOrderExpr := orderExpr.Map(func(expr sqlparser.Expr) sqlparser.Expr { return semantics.RewriteDerivedTableExpression(expr, ti) @@ -666,12 +625,12 @@ func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, ag order.Source = aggrSource.Source aggrSource.Source = nil // removing from plan tree aggregator.Source = order - return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering"), nil + return aggregator, Rewrote("push ordering under aggregation, removing extra ordering") } - return rewrite.Swap(order, aggregator, "push ordering under aggregation") + return Swap(order, aggregator, "push ordering under aggregation") } -func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { +func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []OrderBy) bool { lhs := TableID(aj.LHS) for _, order := range order { deps := ctx.SemTable.DirectDeps(order.Inner.Expr) @@ -682,7 +641,7 @@ func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.Or return true } -func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { +func isOuterTable(op Operator, ts semantics.TableSet) bool { aj, ok := op.(*ApplyJoin) if ok && aj.LeftJoin && TableID(aj.RHS).IsOverlapping(ts) { return true @@ -697,39 +656,35 @@ func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { return false } -func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Projection: return pushFilterUnderProjection(ctx, in, src) case *Route: for _, pred := range in.Predicates { - var err error deps := ctx.SemTable.RecursiveDeps(pred) if !isOuterTable(src, deps) { // we can only update based on predicates on inner tables - src.Routing, err = src.Routing.updateRoutingLogic(ctx, pred) - if err != nil { - return nil, nil, err - } + src.Routing = src.Routing.updateRoutingLogic(ctx, pred) } } - return rewrite.Swap(in, src, "push filter into Route") + return Swap(in, src, "push filter into Route") case *SubQuery: outerTableID := TableID(src.Outer) for _, pred := range in.Predicates { deps := ctx.SemTable.RecursiveDeps(pred) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push filter to outer query in subquery container"), nil + return src, Rewrote("push filter to outer query in subquery container") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (ops.Operator, *rewrite.ApplyResult, error) { +func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (Operator, *ApplyResult) { for _, p := range filter.Predicates { cantPush := false _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { @@ -746,64 +701,64 @@ func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, }, p) if cantPush { - return filter, rewrite.SameTree, nil + return filter, NoRewrite } } - return rewrite.Swap(filter, projection, "push filter under projection") + return Swap(filter, projection, "push filter under projection") } -func tryPushDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushDistinct(in *Distinct) (Operator, *ApplyResult) { if in.Required && in.PushedPerformance { - return in, rewrite.SameTree, nil + return in, NoRewrite } switch src := in.Source.(type) { case *Route: if isDistinct(src.Source) && src.IsSingleShard() { - return src, rewrite.NewTree("distinct not needed"), nil + return src, Rewrote("distinct not needed") } if src.IsSingleShard() || !in.Required { - return rewrite.Swap(in, src, "push distinct under route") + return Swap(in, src, "push distinct under route") } if isDistinct(src.Source) { - return in, rewrite.SameTree, nil + return in, NoRewrite } src.Source = &Distinct{Source: src.Source} in.PushedPerformance = true - return in, rewrite.NewTree("added distinct under route - kept original"), nil + return in, Rewrote("added distinct under route - kept original") case *Distinct: src.Required = false src.PushedPerformance = false - return src, rewrite.NewTree("remove double distinct"), nil + return src, Rewrote("remove double distinct") case *Union: for i := range src.Sources { src.Sources[i] = &Distinct{Source: src.Sources[i]} } in.PushedPerformance = true - return in, rewrite.NewTree("push down distinct under union"), nil + return in, Rewrote("push down distinct under union") case *ApplyJoin: src.LHS = &Distinct{Source: src.LHS} src.RHS = &Distinct{Source: src.RHS} in.PushedPerformance = true if in.Required { - return in, rewrite.NewTree("push distinct under join - kept original"), nil + return in, Rewrote("push distinct under join - kept original") } - return in.Source, rewrite.NewTree("push distinct under join"), nil + return in.Source, Rewrote("push distinct under join") case *Ordering: in.Source = src.Source - return in, rewrite.NewTree("remove ordering under distinct"), nil + return in, Rewrote("remove ordering under distinct") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func isDistinct(op ops.Operator) bool { +func isDistinct(op Operator) bool { switch op := op.(type) { case *Distinct: return true @@ -818,44 +773,40 @@ func isDistinct(op ops.Operator) bool { } } -func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (ops.Operator, *rewrite.ApplyResult, error) { - if res := compactUnion(op); res != rewrite.SameTree { - return op, res, nil +func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (Operator, *ApplyResult) { + if res := compactUnion(op); res != NoRewrite { + return op, res } - var sources []ops.Operator + var sources []Operator var selects []sqlparser.SelectExprs - var err error if op.distinct { - sources, selects, err = mergeUnionInputInAnyOrder(ctx, op) + sources, selects = mergeUnionInputInAnyOrder(ctx, op) } else { - sources, selects, err = mergeUnionInputsInOrder(ctx, op) - } - if err != nil { - return nil, nil, err + sources, selects = mergeUnionInputsInOrder(ctx, op) } if len(sources) == 1 { result := sources[0].(*Route) if result.IsSingleShard() || !op.distinct { - return result, rewrite.NewTree("push union under route"), nil + return result, Rewrote("push union under route") } return &Distinct{ Source: result, Required: true, - }, rewrite.NewTree("push union under route"), nil + }, Rewrote("push union under route") } if len(sources) == len(op.Sources) { - return op, rewrite.SameTree, nil + return op, NoRewrite } - return newUnion(sources, selects, op.unionColumns, op.distinct), rewrite.NewTree("merge union inputs"), nil + return newUnion(sources, selects, op.unionColumns, op.distinct), Rewrote("merge union inputs") } // addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for -func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { +func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon Operator, output Operator) (Operator, error) { horizon, ok := oldHorizon.(*Horizon) if !ok { return output, nil @@ -871,20 +822,14 @@ func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, o return output, nil } - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, err - } - proj, err := createSimpleProjection(ctx, qp, output) - if err != nil { - return nil, err - } + qp := horizon.getQP(ctx) + proj := createSimpleProjection(ctx, qp, output) return proj, nil } -func stopAtRoute(operator ops.Operator) rewrite.VisitRule { +func stopAtRoute(operator Operator) VisitRule { _, isRoute := operator.(*Route) - return rewrite.VisitRule(!isRoute) + return VisitRule(!isRoute) } func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { diff --git a/go/vt/vtgate/planbuilder/operators/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go index b0e6b4440be..bc731f29df6 100644 --- a/go/vt/vtgate/planbuilder/operators/querygraph.go +++ b/go/vt/vtgate/planbuilder/operators/querygraph.go @@ -20,7 +20,6 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -63,7 +62,7 @@ type ( } ) -var _ ops.Operator = (*QueryGraph)(nil) +var _ Operator = (*QueryGraph)(nil) // Introduces implements the tableIDIntroducer interface func (qg *QueryGraph) introducesTableID() semantics.TableSet { @@ -163,7 +162,7 @@ func (qg *QueryGraph) UnsolvedPredicates(_ *semantics.SemTable) []sqlparser.Expr } // Clone implements the Operator interface -func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { +func (qg *QueryGraph) Clone([]Operator) Operator { result := &QueryGraph{ Tables: nil, innerJoins: nil, @@ -176,11 +175,11 @@ func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { return result } -func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { qg.collectPredicate(ctx, e) } diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go index 1cef6706a9f..f9f6f7fa15d 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -47,7 +46,7 @@ type ( HasAggr bool Distinct bool groupByExprs []GroupBy - OrderExprs []ops.OrderBy + OrderExprs []OrderBy HasStar bool // AddedColumn keeps a counter for expressions added to solve HAVING expressions the user is not selecting @@ -72,9 +71,6 @@ type ( // The index at which the user expects to see this column. Set to nil, if the user does not ask for it InnerIndex *int - // The original aliased expression that this group by is referring - aliasedExpr *sqlparser.AliasedExpr - // points to the column on the same aggregator ColOffset int WSOffset int @@ -127,11 +123,10 @@ func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) evalengine.T } // NewGroupBy creates a new group by from the given fields. -func NewGroupBy(inner, simplified sqlparser.Expr, aliasedExpr *sqlparser.AliasedExpr) GroupBy { +func NewGroupBy(inner, simplified sqlparser.Expr) GroupBy { return GroupBy{ Inner: inner, SimplifiedExpr: simplified, - aliasedExpr: aliasedExpr, ColOffset: -1, WSOffset: -1, } @@ -148,8 +143,8 @@ func NewAggr(opCode opcode.AggregateOpcode, f sqlparser.AggrFunc, original *sqlp } } -func (b GroupBy) AsOrderBy() ops.OrderBy { - return ops.OrderBy{ +func (b GroupBy) AsOrderBy() OrderBy { + return OrderBy{ Inner: &sqlparser.Order{ Expr: b.Inner, Direction: sqlparser.AscOrder, @@ -158,26 +153,6 @@ func (b GroupBy) AsOrderBy() ops.OrderBy { } } -func (b GroupBy) AsAliasedExpr() *sqlparser.AliasedExpr { - if b.aliasedExpr != nil { - return b.aliasedExpr - } - col, isColName := b.Inner.(*sqlparser.ColName) - if isColName && b.SimplifiedExpr != b.Inner { - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, - As: col.Name, - } - } - if !isColName && b.SimplifiedExpr != b.Inner { - panic("this should not happen - different inner and weighStringExpr and not a column alias") - } - - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, - } -} - // GetExpr returns the underlying sqlparser.Expr of our SelectExpr func (s SelectExpr) GetExpr() (sqlparser.Expr, error) { switch sel := s.Col.(type) { @@ -316,7 +291,7 @@ func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node.(type) { case *sqlparser.Offset: - // offsets here indicate that a possible aggregation has already been handled by an input + // offsets here indicate that a possible aggregation has already been handled by an input, // so we don't need to worry about aggregation in the original return false, nil case sqlparser.AggrFunc: @@ -381,7 +356,7 @@ func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy if !es.add(ctx, simpleExpr) { continue } - qp.OrderExprs = append(qp.OrderExprs, ops.OrderBy{ + qp.OrderExprs = append(qp.OrderExprs, OrderBy{ Inner: sqlparser.CloneRefOfOrder(order), SimplifiedExpr: simpleExpr, }) @@ -436,7 +411,7 @@ func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) e func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy sqlparser.GroupBy) error { es := &expressionSet{} for _, group := range groupBy { - selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group) + selectExprIdx := qp.FindSelectExprIndexForExpr(ctx, group) simpleExpr, err := qp.GetSimplifiedExpr(ctx, group) if err != nil { return err @@ -450,7 +425,7 @@ func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy continue } - groupBy := NewGroupBy(group, simpleExpr, aliasExpr) + groupBy := NewGroupBy(group, simpleExpr) groupBy.InnerIndex = selectExprIdx qp.groupByExprs = append(qp.groupByExprs, groupBy) @@ -809,7 +784,7 @@ func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.Alias // FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it // returns -1 otherwise. -func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (*int, *sqlparser.AliasedExpr) { +func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) *int { colExpr, isCol := expr.(*sqlparser.ColName) for idx, selectExpr := range qp.SelectExprs { @@ -820,14 +795,14 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningC if isCol { isAliasExpr := aliasedExpr.As.NotEmpty() if isAliasExpr && colExpr.Name.Equal(aliasedExpr.As) { - return &idx, aliasedExpr + return &idx } } if ctx.SemTable.EqualsExprWithDeps(aliasedExpr.Expr, expr) { - return &idx, aliasedExpr + return &idx } } - return nil, nil + return nil } // OldAlignGroupByAndOrderBy TODO Remove once all of horizon planning is done on the operators @@ -920,7 +895,7 @@ func (qp *QueryProjection) GetColumnCount() int { func (qp *QueryProjection) orderByOverlapWithSelectExpr(ctx *plancontext.PlanningContext) bool { for _, expr := range qp.OrderExprs { - idx, _ := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) + idx := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) if idx != nil { return true } @@ -950,7 +925,7 @@ func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningCont if found != -1 { continue } - groupBy := NewGroupBy(ae.Expr, sExpr, ae) + groupBy := NewGroupBy(ae.Expr, sExpr) selectExprIdx := idx groupBy.InnerIndex = &selectExprIdx diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 7c92b716d7c..1319ad7f9f6 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -33,7 +32,7 @@ func TestQP(t *testing.T) { sql string expErr string - expOrder []ops.OrderBy + expOrder []OrderBy }{ { sql: "select * from user", @@ -46,20 +45,20 @@ func TestQP(t *testing.T) { }, { sql: "select 1, count(1) from user order by 1", - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ {Inner: &sqlparser.Order{Expr: sqlparser.NewIntLiteral("1")}, SimplifiedExpr: sqlparser.NewIntLiteral("1")}, }, }, { sql: "select id from user order by col, id, 1", - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("col")}, SimplifiedExpr: sqlparser.NewColName("col")}, {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, SimplifiedExpr: sqlparser.NewColName("id")}, }, }, { sql: "SELECT CONCAT(last_name,', ',first_name) AS full_name FROM mytable ORDER BY full_name", // alias in order not supported - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ { Inner: &sqlparser.Order{Expr: sqlparser.NewColName("full_name")}, SimplifiedExpr: &sqlparser.FuncExpr{ diff --git a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewriters.go similarity index 67% rename from go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go rename to go/vt/vtgate/planbuilder/operators/rewriters.go index 1ecc0cd8e76..6a329860b4b 100644 --- a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go +++ b/go/vt/vtgate/planbuilder/operators/rewriters.go @@ -14,27 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rewrite +package operators import ( "fmt" "slices" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( // VisitF is the visitor that walks an operator tree VisitF func( - op ops.Operator, // op is the operator being visited + op Operator, // op is the operator being visited lhsTables semantics.TableSet, // lhsTables contains the TableSet for all table on the LHS of our parent isRoot bool, // isRoot will be true for the root of the operator tree - ) (ops.Operator, *ApplyResult, error) + ) (Operator, *ApplyResult) // ShouldVisit is used when we want to control which nodes and ancestors to visit and which to skip - ShouldVisit func(ops.Operator) VisitRule + ShouldVisit func(Operator) VisitRule // ApplyResult tracks modifications to node and expression trees. // Only return SameTree when it is acceptable to return the original @@ -52,7 +51,7 @@ type ( ) var ( - SameTree *ApplyResult = nil + NoRewrite *ApplyResult = nil ) const ( @@ -60,7 +59,7 @@ const ( SkipChildren VisitRule = false ) -func NewTree(message string) *ApplyResult { +func Rewrote(message string) *ApplyResult { if DebugOperatorTree { fmt.Println(">>>>>>>> " + message) } @@ -82,13 +81,13 @@ func (ar *ApplyResult) Changed() bool { } // Visit allows for the walking of the operator tree. If any error is returned, the walk is aborted -func Visit(root ops.Operator, visitor func(ops.Operator) error) error { - _, _, err := breakableTopDown(root, func(op ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error) { +func Visit(root Operator, visitor func(Operator) error) error { + _, _, err := breakableTopDown(root, func(op Operator) (Operator, *ApplyResult, VisitRule, error) { err := visitor(op) if err != nil { - return nil, SameTree, SkipChildren, err + return nil, NoRewrite, SkipChildren, err } - return op, SameTree, VisitChildren, nil + return op, NoRewrite, VisitChildren, nil }) return err } @@ -97,16 +96,13 @@ func Visit(root ops.Operator, visitor func(ops.Operator) error) error { // the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated // into a final output indicating whether the operator tree was changed. func BottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (ops.Operator, error) { - op, _, err := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } - return op, nil +) Operator { + op, _ := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) + return op } var DebugOperatorTree = false @@ -122,26 +118,23 @@ func EnableDebugPrinting() (reset func()) { // FixedPointBottomUp rewrites an operator tree much like BottomUp does, // but does the rewriting repeatedly, until a tree walk is done with no changes to the tree. func FixedPointBottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { +) (op Operator) { var id *ApplyResult op = root // will loop while the rewriting changes anything - for ok := true; ok; ok = id != SameTree { + for ok := true; ok; ok = id != NoRewrite { if DebugOperatorTree { - fmt.Println(ops.ToTree(op)) + fmt.Println(ToTree(op)) } // Continue the top-down rewriting process as long as changes were made during the last traversal - op, id, err = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } + op, id = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) } - return op, nil + return op } // TopDown rewrites an operator tree from the bottom up. BottomUp applies a transformation function to @@ -155,31 +148,28 @@ func FixedPointBottomUp( // - shouldVisit: The ShouldVisit function to control which nodes and ancestors to visit and which to skip. // // Returns: -// - ops.Operator: The root of the (potentially) transformed operator tree. +// - Operator: The root of the (potentially) transformed operator tree. // - error: An error if any occurred during the traversal. func TopDown( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { - op, _, err = topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } +) Operator { + op, _ := topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - return op, nil + return op } // Swap takes a tree like a->b->c and swaps `a` and `b`, so we end up with b->a->c -func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResult, error) { +func Swap(parent, child Operator, message string) (Operator, *ApplyResult) { c := child.Inputs() if len(c) != 1 { - return nil, nil, vterrors.VT13001("Swap can only be used on single input operators") + panic(vterrors.VT13001("Swap can only be used on single input operators")) } aInputs := slices.Clone(parent.Inputs()) - var tmp ops.Operator + var tmp Operator for i, in := range aInputs { if in == child { tmp = aInputs[i] @@ -188,30 +178,30 @@ func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResul } } if tmp == nil { - return nil, nil, vterrors.VT13001("Swap can only be used when the second argument is an input to the first") + panic(vterrors.VT13001("Swap can only be used when the second argument is an input to the first")) } - child.SetInputs([]ops.Operator{parent}) + child.SetInputs([]Operator{parent}) parent.SetInputs(aInputs) - return child, NewTree(message), nil + return child, Rewrote(message) } func bottomUp( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { +) (Operator, *ApplyResult) { if shouldVisit != nil && !shouldVisit(root) { - return root, SameTree, nil + return root, NoRewrite } oldInputs := root.Inputs() var anythingChanged *ApplyResult - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID // noLHSTableSet is used to mark which operators that do not send data from the LHS to the RHS @@ -227,10 +217,7 @@ func bottomUp( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err - } + in, changed := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } @@ -239,18 +226,15 @@ func bottomUp( root = root.Clone(newInputs) } - newOp, treeIdentity, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } + newOp, treeIdentity := rewriter(root, rootID, isRoot) anythingChanged = anythingChanged.Merge(treeIdentity) - return newOp, anythingChanged, nil + return newOp, anythingChanged } func breakableTopDown( - in ops.Operator, - rewriter func(ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error), -) (ops.Operator, *ApplyResult, error) { + in Operator, + rewriter func(Operator) (Operator, *ApplyResult, VisitRule, error), +) (Operator, *ApplyResult, error) { newOp, identity, visit, err := rewriter(in) if err != nil || visit == SkipChildren { return newOp, identity, err @@ -259,17 +243,17 @@ func breakableTopDown( var anythingChanged *ApplyResult oldInputs := newOp.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) for i, oldInput := range oldInputs { newInputs[i], identity, err = breakableTopDown(oldInput, rewriter) anythingChanged = anythingChanged.Merge(identity) if err != nil { - return nil, SameTree, err + return nil, NoRewrite, err } } if anythingChanged.Changed() { - return newOp, SameTree, nil + return newOp, NoRewrite, nil } return newOp.Clone(newInputs), anythingChanged, nil @@ -279,20 +263,17 @@ func breakableTopDown( // top down and applies the given transformation function. It also returns the ApplyResult // indicating whether the tree was changed func topDown( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { - newOp, anythingChanged, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } +) (Operator, *ApplyResult) { + newOp, anythingChanged := rewriter(root, rootID, isRoot) if !shouldVisit(root) { - return newOp, anythingChanged, nil + return newOp, anythingChanged } if anythingChanged.Changed() { @@ -300,7 +281,7 @@ func topDown( } oldInputs := root.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID type noLHSTableSet interface{ NoLHSTableSet() } @@ -309,17 +290,14 @@ func topDown( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err - } + in, changed := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } - if anythingChanged != SameTree { - return root.Clone(newInputs), anythingChanged, nil + if anythingChanged != NoRewrite { + return root.Clone(newInputs), anythingChanged } - return root, SameTree, nil + return root, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index acbc28553dd..d5eee19e5dd 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +32,7 @@ import ( type ( Route struct { - Source ops.Operator + Source Operator // Routes that have been merged into this one. MergedWith []*Route @@ -89,7 +88,7 @@ type ( Routing interface { // UpdateRoutingParams allows a Routing to control the routing params that will be used by the engine Route // OpCode is already set, and the default keyspace is set for read queries - UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) error + UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) // Clone returns a copy of the routing. Since we are trying different variation of merging, // one Routing can be used in different constellations. @@ -103,27 +102,27 @@ type ( // updateRoutingLogic updates the routing to take predicates into account. This can be used for routing // using vindexes or for figuring out which keyspace an information_schema query should be sent to. - updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) + updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing } ) // UpdateRoutingLogic first checks if we are dealing with a predicate that -func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) (Routing, error) { +func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) Routing { ks := r.Keyspace() if ks == nil { var err error ks, err = ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } } nr := &NoneRouting{keyspace: ks} if isConstantFalse(expr) { - return nr, nil + return nr } - exit := func() (Routing, error) { + exit := func() Routing { return r.updateRoutingLogic(ctx, expr) } @@ -135,7 +134,7 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r if cmp.Operator != sqlparser.NullSafeEqualOp && (sqlparser.IsNull(cmp.Left) || sqlparser.IsNull(cmp.Right)) { // any comparison against a literal null, except a null safe equality (<=>), will return null - return nr, nil + return nr } tuples, ok := cmp.Right.(sqlparser.ValTuple) @@ -148,13 +147,13 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r for _, n := range tuples { // If any of the values in the tuple is a literal null, we know that this comparison will always return NULL if sqlparser.IsNull(n) { - return nr, nil + return nr } } case sqlparser.InOp: // WHERE col IN (null) if len(tuples) == 1 && sqlparser.IsNull(tuples[0]) { - return nr, nil + return nr } } @@ -189,7 +188,7 @@ func (r *Route) Cost() int { } // Clone implements the Operator interface -func (r *Route) Clone(inputs []ops.Operator) ops.Operator { +func (r *Route) Clone(inputs []Operator) Operator { cloneRoute := *r cloneRoute.Source = inputs[0] cloneRoute.Routing = r.Routing.Clone() @@ -197,12 +196,12 @@ func (r *Route) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (r *Route) Inputs() []ops.Operator { - return []ops.Operator{r.Source} +func (r *Route) Inputs() []Operator { + return []Operator{r.Source} } // SetInputs implements the Operator interface -func (r *Route) SetInputs(ops []ops.Operator) { +func (r *Route) SetInputs(ops []Operator) { r.Source = ops[0] } @@ -357,7 +356,7 @@ func createRoute( ctx *plancontext.PlanningContext, queryTable *QueryTable, solves semantics.TableSet, -) (ops.Operator, error) { +) Operator { if queryTable.IsInfSchema { return createInfSchemaRoute(ctx, queryTable) } @@ -372,13 +371,13 @@ func findVSchemaTableAndCreateRoute( tableName sqlparser.TableName, solves semantics.TableSet, planAlternates bool, -) (*Route, error) { +) *Route { vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(tableName) if target != nil { - return nil, vterrors.VT09017("SELECT with a target destination is not allowed") + panic(vterrors.VT09017("SELECT with a target destination is not allowed")) } if err != nil { - return nil, err + panic(err) } return createRouteFromVSchemaTable( @@ -397,7 +396,7 @@ func createRouteFromVSchemaTable( vschemaTable *vindexes.Table, solves semantics.TableSet, planAlternates bool, -) (*Route, error) { +) *Route { if vschemaTable.Name.String() != queryTable.Table.Name.String() { // we are dealing with a routed table queryTable = queryTable.Clone() @@ -405,7 +404,7 @@ func createRouteFromVSchemaTable( queryTable.Table.Name = vschemaTable.Name astTable, ok := queryTable.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, vterrors.VT13001("a derived table should never be a routed table") + panic(vterrors.VT13001("a derived table should never be a routed table")) } realTableName := sqlparser.NewIdentifierCS(vschemaTable.Name.String()) astTable.Name = realTableName @@ -424,11 +423,7 @@ func createRouteFromVSchemaTable( // We create the appropiate Routing struct here, depending on the type of table we are dealing with. routing := createRoutingForVTable(vschemaTable, solves) for _, predicate := range queryTable.Predicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } plan.Routing = routing @@ -436,24 +431,16 @@ func createRouteFromVSchemaTable( switch routing := routing.(type) { case *ShardedRouting: if routing.isScatter() && len(queryTable.Predicates) > 0 { - var err error // If we have a scatter query, it's worth spending a little extra time seeing if we can't improve it - plan.Routing, err = routing.tryImprove(ctx, queryTable) - if err != nil { - return nil, err - } + plan.Routing = routing.tryImprove(ctx, queryTable) } case *AnyShardRouting: if planAlternates { - alternates, err := createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable, solves) - if err != nil { - return nil, err - } - routing.Alternates = alternates + routing.Alternates = createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable, solves) } } - return plan, nil + return plan } func createRoutingForVTable(vschemaTable *vindexes.Table, id semantics.TableSet) Routing { @@ -474,13 +461,13 @@ func createAlternateRoutesFromVSchemaTable( queryTable *QueryTable, vschemaTable *vindexes.Table, solves semantics.TableSet, -) (map[*vindexes.Keyspace]*Route, error) { +) map[*vindexes.Keyspace]*Route { routes := make(map[*vindexes.Keyspace]*Route) switch vschemaTable.Type { case "", vindexes.TypeReference: for ksName, referenceTable := range vschemaTable.ReferencedBy { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, sqlparser.TableName{ @@ -490,23 +477,17 @@ func createAlternateRoutesFromVSchemaTable( solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } routes[referenceTable.Keyspace] = route } if vschemaTable.Source != nil { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, vschemaTable.Source.TableName, solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } keyspace := route.Routing.Keyspace() if keyspace != nil { routes[keyspace] = route @@ -514,15 +495,12 @@ func createAlternateRoutesFromVSchemaTable( } } - return routes, nil + return routes } -func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // first we see if the predicate changes how we route - newRouting, err := UpdateRoutingLogic(ctx, expr, r.Routing) - if err != nil { - panic(err) - } + newRouting := UpdateRoutingLogic(ctx, expr, r.Routing) r.Routing = newRouting // we also need to push the predicate down into the query @@ -530,16 +508,13 @@ func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return r } -func createProjection(ctx *plancontext.PlanningContext, src ops.Operator) (*Projection, error) { +func createProjection(ctx *plancontext.PlanningContext, src Operator) *Projection { proj := newAliasedProjection(src) cols := src.GetColumns(ctx) for _, col := range cols { - _, err := proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, err - } + proj.addUnexploredExpr(col, col.Expr) } - return proj, nil + return proj } func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) int { @@ -561,10 +536,7 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, } // If no-one could be found, we probably don't have one yet, so we add one here - src, err := createProjection(ctx, r.Source) - if err != nil { - panic(err) - } + src := createProjection(ctx, r.Source) r.Source = src offsets = src.addColumnsWithoutPushing(ctx, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) @@ -572,7 +544,7 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, } type selectExpressions interface { - ops.Operator + Operator addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) int addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) []int isDerived() bool @@ -581,7 +553,7 @@ type selectExpressions interface { // addColumnToInput adds a column to an operator without pushing it down. // It will return a bool indicating whether the addition was successful or not, // and an offset to where the column can be found -func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator ops.Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (ops.Operator, bool, []int) { +func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (Operator, bool, []int) { switch op := operator.(type) { case *SubQuery: src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) @@ -657,7 +629,7 @@ func (r *Route) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return r.Source.GetSelectExprs(ctx) } -func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return r.Source.GetOrdering(ctx) } @@ -673,7 +645,7 @@ func (r *Route) TablesUsed() []string { return collect() } -func isSpecialOrderBy(o ops.OrderBy) bool { +func isSpecialOrderBy(o OrderBy) bool { if sqlparser.IsNull(o.Inner.Expr) { return true } @@ -681,7 +653,7 @@ func isSpecialOrderBy(o ops.OrderBy) bool { return isFunction && f.Name.Lowered() == "rand" } -func (r *Route) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (r *Route) planOffsets(ctx *plancontext.PlanningContext) Operator { // if operator is returning data from a single shard, we don't need to do anything more if r.IsSingleShard() { return nil diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 5bce334a609..30c187ac955 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -26,8 +26,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -38,34 +36,34 @@ type ( left, right semantics.TableSet } - opCacheMap map[tableSetPair]ops.Operator + opCacheMap map[tableSetPair]Operator ) -func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (Operator, *ApplyResult) { innerRoute, ok := op.Source.(*Route) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } if !(innerRoute.Routing.OpCode() == engine.EqualUnique) && !op.IsMergeable(ctx) { // no need to check anything if we are sure that we will only hit a single shard - return op, rewrite.SameTree, nil + return op, NoRewrite } - return rewrite.Swap(op, op.Source, "push derived under route") + return Swap(op, op.Source, "push derived under route") } -func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, *rewrite.ApplyResult, error) { +func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (Operator, *ApplyResult) { return mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin) } -func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed *rewrite.ApplyResult, err error) { +func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result Operator, changed *ApplyResult) { switch { case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right: - result, err = leftToRightSolve(ctx, op) + result = leftToRightSolve(ctx, op) default: - result, err = greedySolve(ctx, op) + result = greedySolve(ctx, op) } unresolved := op.UnsolvedPredicates(ctx.SemTable) @@ -75,7 +73,7 @@ func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (resul result = newFilter(result, ctx.SemTable.AndExpressions(unresolved...)) } - changed = rewrite.NewTree("solved query graph") + changed = Rewrote("solved query graph") return } @@ -84,18 +82,18 @@ func buildVindexTableForDML( tableInfo semantics.TableInfo, table *QueryTable, dmlType string, -) (*vindexes.Table, Routing, error) { +) (*vindexes.Table, Routing) { vindexTable := tableInfo.GetVindexTable() if vindexTable.Source != nil { sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vindexTable.Source.TableName) if err != nil { - return nil, nil, err + panic(err) } vindexTable = sourceTable } if !vindexTable.Keyspace.Sharded { - return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace}, nil + return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace} } var dest key.Destination @@ -103,23 +101,23 @@ func buildVindexTableForDML( var err error tblName, ok := table.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + panic(vterrors.VT12001("multi shard UPDATE with LIMIT")) } _, _, _, typ, dest, err = ctx.VSchema.FindTableOrVindex(tblName) if err != nil { - return nil, nil, err + panic(err) } if dest == nil { routing := &ShardedRouting{ keyspace: vindexTable.Keyspace, RouteOpCode: engine.Scatter, } - return vindexTable, routing, nil + return vindexTable, routing } if typ != topodatapb.TabletType_PRIMARY { - return nil, nil, vterrors.VT09002(dmlType) + panic(vterrors.VT09002(dmlType)) } // we are dealing with an explicitly targeted DML @@ -127,7 +125,7 @@ func buildVindexTableForDML( keyspace: vindexTable.Keyspace, TargetDestination: dest, } - return vindexTable, routing, nil + return vindexTable, routing } func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string { @@ -154,21 +152,14 @@ func getUpdateVindexInformation( vindexTable *vindexes.Table, tableID semantics.TableSet, assignments []SetExpr, -) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string, error) { +) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string) { if !vindexTable.Keyspace.Sharded { - return nil, nil, "", nil, nil + return nil, nil, "", nil } - primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, vindexTable) - if err != nil { - return nil, nil, "", nil, err - } - - changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, err := buildChangedVindexesValues(ctx, updStmt, vindexTable, primaryVindex.Columns, assignments) - if err != nil { - return nil, nil, "", nil, err - } - return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, nil + primaryVindex, vindexAndPredicates := getVindexInformation(tableID, vindexTable) + changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex := buildChangedVindexesValues(ctx, updStmt, vindexTable, primaryVindex.Columns, assignments) + return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex } /* @@ -177,67 +168,51 @@ func getUpdateVindexInformation( and removes the two inputs to this cheapest plan and instead adds the join. As an optimization, it first only considers joining tables that have predicates defined between them */ -func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - routeOps, err := seedOperatorList(ctx, qg) +func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + routeOps := seedOperatorList(ctx, qg) planCache := opCacheMap{} - if err != nil { - return nil, err - } - op, err := mergeRoutes(ctx, qg, routeOps, planCache, false) - if err != nil { - return nil, err - } - return op, nil + return mergeRoutes(ctx, qg, routeOps, planCache, false) } -func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - plans, err := seedOperatorList(ctx, qg) - if err != nil { - return nil, err - } +func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + plans := seedOperatorList(ctx, qg) - var acc ops.Operator + var acc Operator for _, plan := range plans { if acc == nil { acc = plan continue } joinPredicates := qg.GetPredicates(TableID(acc), TableID(plan)) - acc, _, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true) - if err != nil { - return nil, err - } + acc, _ = mergeOrJoin(ctx, acc, plan, joinPredicates, true) } - return acc, nil + return acc } // seedOperatorList returns a route for each table in the qg -func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) ([]ops.Operator, error) { - plans := make([]ops.Operator, len(qg.Tables)) +func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) []Operator { + plans := make([]Operator, len(qg.Tables)) // we start by seeding the table with the single routes for i, table := range qg.Tables { solves := ctx.SemTable.TableSetFor(table.Alias) - plan, err := createRoute(ctx, table, solves) - if err != nil { - return nil, err - } + plan := createRoute(ctx, table, solves) if qg.NoDeps != nil { plan = plan.AddPredicate(ctx, qg.NoDeps) } plans[i] = plan } - return plans, nil + return plans } -func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) (ops.Operator, error) { +func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) Operator { ks, err := ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } - var src ops.Operator = &Table{ + var src Operator = &Table{ QTable: table, VTable: &vindexes.Table{ Name: table.Table.Name, @@ -246,26 +221,20 @@ func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) ( } var routing Routing = &InfoSchemaRouting{} for _, pred := range table.Predicates { - routing, err = UpdateRoutingLogic(ctx, pred, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, pred, routing) } return &Route{ Source: src, Routing: routing, - }, nil + } } -func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []ops.Operator, planCache opCacheMap, crossJoinsOK bool) (ops.Operator, error) { +func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []Operator, planCache opCacheMap, crossJoinsOK bool) Operator { if len(physicalOps) == 0 { - return nil, nil + return nil } for len(physicalOps) > 1 { - bestTree, lIdx, rIdx, err := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) - if err != nil { - return nil, err - } + bestTree, lIdx, rIdx := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) // if we found a plan, we'll replace the two plans that were joined with the join plan created if bestTree != nil { // we remove one plan, and replace the other @@ -279,7 +248,7 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ physicalOps = append(physicalOps, bestTree) } else { if crossJoinsOK { - return nil, vterrors.VT13001("should not happen: we should be able to merge cross joins") + panic(vterrors.VT13001("should not happen: we should be able to merge cross joins")) } // we will only fail to find a join plan when there are only cross joins left // when that happens, we switch over to allow cross joins as well. @@ -287,20 +256,20 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ crossJoinsOK = true } } - return physicalOps[0], nil + return physicalOps[0] } -func removeAt(plans []ops.Operator, idx int) []ops.Operator { +func removeAt(plans []Operator, idx int) []Operator { return append(plans[:idx], plans[idx+1:]...) } func findBestJoin( ctx *plancontext.PlanningContext, qg *QueryGraph, - plans []ops.Operator, + plans []Operator, planCache opCacheMap, crossJoinsOK bool, -) (bestPlan ops.Operator, lIdx int, rIdx int, err error) { +) (bestPlan Operator, lIdx int, rIdx int) { for i, lhs := range plans { for j, rhs := range plans { if i == j { @@ -313,10 +282,7 @@ func findBestJoin( // cartesian product, which is almost always a bad idea continue } - plan, err := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) - if err != nil { - return nil, 0, 0, err - } + plan := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) if bestPlan == nil || CostOf(plan) < CostOf(bestPlan) { bestPlan = plan // remember which plans we based on, so we can remove them later @@ -325,30 +291,25 @@ func findBestJoin( } } } - return bestPlan, lIdx, rIdx, nil + return bestPlan, lIdx, rIdx } -func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr) (ops.Operator, error) { +func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs Operator, joinPredicates []sqlparser.Expr) Operator { solves := tableSetPair{left: TableID(lhs), right: TableID(rhs)} cachedPlan := cm[solves] if cachedPlan != nil { - return cachedPlan, nil + return cachedPlan } - join, _, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) - if err != nil { - return nil, err - } + join, _ := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) cm[solves] = join - return join, nil + return join } // requiresSwitchingSides will return true if any of the operators with the root from the given operator tree // is of the type that should not be on the RHS of a join -func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) bool { - required := false - - _ = rewrite.Visit(op, func(current ops.Operator) error { +func requiresSwitchingSides(ctx *plancontext.PlanningContext, op Operator) (required bool) { + _ = Visit(op, func(current Operator) error { horizon, isHorizon := current.(*Horizon) if isHorizon && !horizon.IsMergeable(ctx) { @@ -358,14 +319,13 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b return nil }) - - return required + return } -func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, *rewrite.ApplyResult, error) { +func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, inner bool) (Operator, *ApplyResult) { newPlan := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(joinPredicates, inner)) if newPlan != nil { - return newPlan, rewrite.NewTree("merge routes into single operator"), nil + return newPlan, Rewrote("merge routes into single operator") } if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) { @@ -376,26 +336,20 @@ func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPr join.AddJoinPredicate(ctx, pred) } ctx.SemTable.QuerySignature.HashJoin = true - return join, rewrite.NewTree("use a hash join because we have LIMIT on the LHS"), nil + return join, Rewrote("use a hash join because we have LIMIT on the LHS") } join := NewApplyJoin(Clone(rhs), Clone(lhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin, switching side because LIMIT"), nil + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin, switching side because LIMIT") } join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin "), nil + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin ") } -func operatorsToRoutes(a, b ops.Operator) (*Route, *Route) { +func operatorsToRoutes(a, b Operator) (*Route, *Route) { aRoute, ok := a.(*Route) if !ok { return nil, nil @@ -433,7 +387,7 @@ func canMergeOnFilter(ctx *plancontext.PlanningContext, a, b *Route, predicate s return rVindex == lVindex } -func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlparser.Expr) vindexes.SingleColumn { +func findColumnVindex(ctx *plancontext.PlanningContext, a Operator, exp sqlparser.Expr) vindexes.SingleColumn { _, isCol := exp.(*sqlparser.ColName) if !isCol { return nil @@ -458,7 +412,7 @@ func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlp deps := ctx.SemTable.RecursiveDeps(expr) - _ = rewrite.Visit(a, func(rel ops.Operator) error { + _ = Visit(a, func(rel Operator) error { to, isTableOp := rel.(tableIDIntroducer) if !isTableOp { return nil @@ -612,14 +566,14 @@ func hexEqual(a, b *sqlparser.Literal) bool { return false } -func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) (ops.Operator, error) { +func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) Operator { if len(exprs) == 0 { - return op, nil + return op } for _, expr := range exprs { AddPredicate(ctx, op, expr, true, newFilter) } - return op, nil + return op } diff --git a/go/vt/vtgate/planbuilder/operators/sequential.go b/go/vt/vtgate/planbuilder/operators/sequential.go index 2b333c6270a..2db376a97bb 100644 --- a/go/vt/vtgate/planbuilder/operators/sequential.go +++ b/go/vt/vtgate/planbuilder/operators/sequential.go @@ -17,35 +17,34 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Sequential struct { - Sources []ops.Operator + Sources []Operator noPredicates noColumns } // Clone implements the Operator interface -func (s *Sequential) Clone(inputs []ops.Operator) ops.Operator { +func (s *Sequential) Clone(inputs []Operator) Operator { newOp := *s newOp.Sources = inputs return &newOp } -func (s *Sequential) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (s *Sequential) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (s *Sequential) Inputs() []ops.Operator { +func (s *Sequential) Inputs() []Operator { return s.Sources } // SetInputs implements the Operator interface -func (s *Sequential) SetInputs(ops []ops.Operator) { +func (s *Sequential) SetInputs(ops []Operator) { s.Sources = ops } diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index d54db071d46..239ae9ce419 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -97,28 +97,23 @@ func (tr *ShardedRouting) isScatter() bool { // This can sometimes push a predicate to the top, so it's not hiding inside an OR // 2. If that is not enough, an additional rewrite pass is performed where we try to // turn ORs into IN, which is easier for the planner to plan -func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) (Routing, error) { +func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) Routing { oldPredicates := queryTable.Predicates queryTable.Predicates = nil tr.SeenPredicates = nil var routing Routing = tr - var err error for _, pred := range oldPredicates { rewritten := sqlparser.RewritePredicate(pred) predicates := sqlparser.SplitAndExpression(nil, rewritten.(sqlparser.Expr)) for _, predicate := range predicates { queryTable.Predicates = append(queryTable.Predicates, predicate) - - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } // If we have something other than a sharded routing with scatter, we are done if sr, ok := routing.(*ShardedRouting); !ok || !sr.isScatter() { - return routing, nil + return routing } // if we _still_ haven't found a better route, we can run this additional rewrite on any ORs we have @@ -128,23 +123,19 @@ func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTabl continue } for _, predicate := range sqlparser.ExtractINFromOR(or) { - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } - return routing, nil + return routing } -func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace if tr.Selected != nil { rp.Vindex = tr.Selected.FoundVindex rp.Values = tr.Selected.Values } - return nil } func (tr *ShardedRouting) Clone() Routing { @@ -166,17 +157,13 @@ func (tr *ShardedRouting) Clone() Routing { } } -func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { tr.SeenPredicates = append(tr.SeenPredicates, expr) - newRouting, newVindexFound, err := tr.searchForNewVindexes(ctx, expr) - if err != nil { - return nil, err - } - + newRouting, newVindexFound := tr.searchForNewVindexes(ctx, expr) if newRouting != nil { // we found something that we can route with something other than ShardedRouting - return newRouting, nil + return newRouting } // if we didn't open up any new vindex Options, no need to enter here @@ -184,10 +171,10 @@ func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, e tr.PickBestAvailableVindex() } - return tr, nil + return tr } -func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (Routing, error) { +func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) Routing { tr.RouteOpCode = engine.Scatter tr.Selected = nil for i, vp := range tr.VindexPreds { @@ -196,16 +183,12 @@ func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (R var routing Routing = tr for _, predicate := range tr.SeenPredicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } - return routing, nil + return routing } -func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool, error) { +func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool) { newVindexFound := false switch node := predicate.(type) { case *sqlparser.ComparisonExpr: @@ -216,23 +199,23 @@ func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, newVindexFound = newVindexFound || found } - return nil, newVindexFound, nil + return nil, newVindexFound } -func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool, err error) { +func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool) { switch cmp.Operator { case sqlparser.EqualOp: found := tr.planEqualOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.InOp: found := tr.planInOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.LikeOp: found := tr.planLikeOp(ctx, cmp) - return nil, found, nil + return nil, found } - return nil, false, nil + return nil, false } func (tr *ShardedRouting) planIsExpr(ctx *plancontext.PlanningContext, node *sqlparser.IsExpr) bool { diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index a401b29074d..279669ade38 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,8 +33,8 @@ import ( // outer query through a join. type SubQuery struct { // Fields filled in at the time of construction: - Outer ops.Operator // Outer query operator. - Subquery ops.Operator // Subquery operator. + Outer Operator // Outer query operator. + Subquery Operator // Subquery operator. FilterType opcode.PulloutOpcode // Type of subquery filter. Original sqlparser.Expr // This is the expression we should use if we can merge the inner to the outer originalSubquery *sqlparser.Subquery // Subquery representation, e.g., (SELECT foo from user LIMIT 1). @@ -54,7 +53,7 @@ type SubQuery struct { IsProjection bool } -func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) Operator { sq.Vars = make(map[string]int) columns, err := sq.GetJoinColumns(ctx, sq.Outer) if err != nil { @@ -69,24 +68,24 @@ func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { return nil } -func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer ops.Operator) (result []*sqlparser.ColName, err error) { +func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer Operator) (result []*sqlparser.ColName) { joinColumns, err := sq.GetJoinColumns(ctx, outer) if err != nil { - return nil, err + return nil } for _, jc := range joinColumns { for _, lhsExpr := range jc.LHSExprs { col, ok := lhsExpr.Expr.(*sqlparser.ColName) if !ok { - return nil, vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr)) + panic(vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr))) } result = append(result, col) } } - return result, nil + return result } -func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.Operator) ([]JoinColumn, error) { +func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer Operator) ([]JoinColumn, error) { if outer == nil { return nil, vterrors.VT13001("outer operator cannot be nil") } @@ -98,7 +97,7 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } sq.outerID = outerID mapper := func(in sqlparser.Expr) (JoinColumn, error) { - return breakExpressionInLHSandRHSForApplyJoin(ctx, in, outerID) + return breakExpressionInLHSandRHSForApplyJoin(ctx, in, outerID), nil } joinPredicates, err := slice.MapWithError(sq.Predicates, mapper) if err != nil { @@ -109,7 +108,7 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } // Clone implements the Operator interface -func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { +func (sq *SubQuery) Clone(inputs []Operator) Operator { klone := *sq switch len(inputs) { case 1: @@ -126,21 +125,21 @@ func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { return &klone } -func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sq.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sq *SubQuery) Inputs() []ops.Operator { +func (sq *SubQuery) Inputs() []Operator { if sq.Outer == nil { - return []ops.Operator{sq.Subquery} + return []Operator{sq.Subquery} } - return []ops.Operator{sq.Outer, sq.Subquery} + return []Operator{sq.Outer, sq.Subquery} } // SetInputs implements the Operator interface -func (sq *SubQuery) SetInputs(inputs []ops.Operator) { +func (sq *SubQuery) SetInputs(inputs []Operator) { switch len(inputs) { case 1: sq.Subquery = inputs[0] @@ -168,7 +167,7 @@ func (sq *SubQuery) ShortDescription() string { return fmt.Sprintf("%s %v%s", typ, sq.FilterType.String(), pred) } -func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sq.Outer = sq.Outer.AddPredicate(ctx, expr) return sq } @@ -197,7 +196,7 @@ func (sq *SubQuery) GetMergePredicates() []sqlparser.Expr { return sq.Predicates } -func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer Operator) (Operator, error) { if !sq.TopLevel { return nil, subqueryNotAtTopErr } @@ -215,7 +214,7 @@ func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) var correlatedSubqueryErr = vterrors.VT12001("correlated subquery is only supported for EXISTS") var subqueryNotAtTopErr = vterrors.VT12001("unmergable subquery can not be inside complex expression") -func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer Operator) (Operator, error) { if len(sq.Predicates) > 0 { if sq.FilterType != opcode.PulloutExists { return nil, correlatedSubqueryErr @@ -282,22 +281,8 @@ func (sq *SubQuery) isMerged(ctx *plancontext.PlanningContext) bool { } // mapExpr rewrites all expressions according to the provided function -func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) (sqlparser.Expr, error)) error { - newPredicates, err := slice.MapWithError(sq.Predicates, f) - if err != nil { - return err - } - sq.Predicates = newPredicates - - sq.Original, err = f(sq.Original) - if err != nil { - return err - } - - originalSubquery, err := f(sq.originalSubquery) - if err != nil { - return err - } - sq.originalSubquery = originalSubquery.(*sqlparser.Subquery) - return nil +func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) sqlparser.Expr) { + sq.Predicates = slice.Map(sq.Predicates, f) + sq.Original = f(sq.Original) + sq.originalSubquery = f(sq.originalSubquery).(*sqlparser.Subquery) } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go index 1d1d12bbfe3..b2de19408b4 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_builder.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go @@ -19,7 +19,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -32,7 +31,7 @@ type SubQueryBuilder struct { outerID semantics.TableSet } -func (sqb *SubQueryBuilder) getRootOperator(op ops.Operator, decorator func(operator ops.Operator) ops.Operator) ops.Operator { +func (sqb *SubQueryBuilder) getRootOperator(op Operator, decorator func(operator Operator) Operator) Operator { if len(sqb.Inner) == 0 { return op } @@ -53,19 +52,16 @@ func (sqb *SubQueryBuilder) handleSubquery( ctx *plancontext.PlanningContext, expr sqlparser.Expr, outerID semantics.TableSet, -) (*SubQuery, error) { +) *SubQuery { subq, parentExpr := getSubQuery(expr) if subq == nil { - return nil, nil + return nil } argName := ctx.GetReservedArgumentFor(subq) - sqInner, err := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) - if err != nil { - return nil, err - } + sqInner := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) sqb.Inner = append(sqb.Inner, sqInner) - return sqInner, nil + return sqInner } func getSubQuery(expr sqlparser.Expr) (subqueryExprExists *sqlparser.Subquery, parentExpr sqlparser.Expr) { @@ -99,7 +95,7 @@ func createSubqueryOp( subq *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { switch parent := parent.(type) { case *sqlparser.NotExpr: switch parent.Expr.(type) { @@ -120,20 +116,14 @@ func createSubqueryOp( // and extracts subqueries into operators func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []JoinColumn) { switch stmt := stmt.(type) { case *sqlparser.Select: return sqb.inspectSelect(ctx, stmt) case *sqlparser.Union: - exprs1, cols1, err := sqb.inspectStatement(ctx, stmt.Left) - if err != nil { - return nil, nil, err - } - exprs2, cols2, err := sqb.inspectStatement(ctx, stmt.Right) - if err != nil { - return nil, nil, err - } - return append(exprs1, exprs2...), append(cols1, cols2...), nil + exprs1, cols1 := sqb.inspectStatement(ctx, stmt.Left) + exprs2, cols2 := sqb.inspectStatement(ctx, stmt.Right) + return append(exprs1, exprs2...), append(cols1, cols2...) } panic("unknown type") } @@ -144,22 +134,12 @@ func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, func (sqb *SubQueryBuilder) inspectSelect( ctx *plancontext.PlanningContext, sel *sqlparser.Select, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []JoinColumn) { // first we need to go through all the places where one can find predicates // and search for subqueries - newWhere, wherePreds, whereJoinCols, err := sqb.inspectWhere(ctx, sel.Where) - if err != nil { - return nil, nil, err - } - newHaving, havingPreds, havingJoinCols, err := sqb.inspectWhere(ctx, sel.Having) - if err != nil { - return nil, nil, err - } - - newFrom, onPreds, onJoinCols, err := sqb.inspectOnExpr(ctx, sel.From) - if err != nil { - return nil, nil, err - } + newWhere, wherePreds, whereJoinCols := sqb.inspectWhere(ctx, sel.Where) + newHaving, havingPreds, havingJoinCols := sqb.inspectWhere(ctx, sel.Having) + newFrom, onPreds, onJoinCols := sqb.inspectOnExpr(ctx, sel.From) // then we use the updated AST structs to build the operator // these AST elements have any subqueries replace by arguments @@ -168,8 +148,7 @@ func (sqb *SubQueryBuilder) inspectSelect( sel.From = newFrom return append(append(wherePreds, havingPreds...), onPreds...), - append(append(whereJoinCols, havingJoinCols...), onJoinCols...), - nil + append(append(whereJoinCols, havingJoinCols...), onJoinCols...) } func createSubquery( @@ -181,7 +160,7 @@ func createSubquery( argName string, filterType opcode.PulloutOpcode, isProjection bool, -) (*SubQuery, error) { +) *SubQuery { topLevel := ctx.SemTable.EqualsExpr(original, parent) original = cloneASTAndSemState(ctx, original) originalSq := cloneASTAndSemState(ctx, subq) @@ -189,20 +168,13 @@ func createSubquery( totalID := subqID.Merge(outerID) sqc := &SubQueryBuilder{totalID: totalID, subqID: subqID, outerID: outerID} - predicates, joinCols, err := sqc.inspectStatement(ctx, subq.Select) - if err != nil { - return nil, err - } - + predicates, joinCols := sqc.inspectStatement(ctx, subq.Select) stmt := rewriteRemainingColumns(ctx, subq.Select, subqID) // TODO: this should not be needed. We are using CopyOnRewrite above, but somehow this is not getting copied ctx.SemTable.CopySemanticInfo(subq.Select, stmt) - opInner, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } + opInner := translateQueryToOp(ctx, stmt) opInner = sqc.getRootOperator(opInner, nil) return &SubQuery{ @@ -215,15 +187,15 @@ func createSubquery( IsProjection: isProjection, TopLevel: topLevel, JoinColumns: joinCols, - }, nil + } } func (sqb *SubQueryBuilder) inspectWhere( ctx *plancontext.PlanningContext, in *sqlparser.Where, -) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn, error) { +) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn) { if in == nil { - return nil, nil, nil, nil + return nil, nil, nil } jpc := &joinPredicateCollector{ totalID: sqb.totalID, @@ -232,16 +204,11 @@ func (sqb *SubQueryBuilder) inspectWhere( } for _, predicate := range sqlparser.SplitAndExpression(nil, in.Expr) { sqlparser.RemoveKeyspaceFromColName(predicate) - subq, err := sqb.handleSubquery(ctx, predicate, sqb.totalID) - if err != nil { - return nil, nil, nil, err - } + subq := sqb.handleSubquery(ctx, predicate, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, predicate); err != nil { - return nil, nil, nil, err - } + jpc.inspectPredicate(ctx, predicate) } if len(jpc.remainingPredicates) == 0 { @@ -250,13 +217,13 @@ func (sqb *SubQueryBuilder) inspectWhere( in.Expr = sqlparser.AndExpressions(jpc.remainingPredicates...) } - return in, jpc.predicates, jpc.joinColumns, nil + return in, jpc.predicates, jpc.joinColumns } func (sqb *SubQueryBuilder) inspectOnExpr( ctx *plancontext.PlanningContext, from []sqlparser.TableExpr, -) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn, err error) { +) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn) { for _, tbl := range from { tbl := sqlparser.CopyOnRewrite(tbl, dontEnterSubqueries, func(cursor *sqlparser.CopyOnWriteCursor) { cond, ok := cursor.Node().(*sqlparser.JoinCondition) @@ -271,20 +238,11 @@ func (sqb *SubQueryBuilder) inspectOnExpr( } for _, pred := range sqlparser.SplitAndExpression(nil, cond.On) { - subq, innerErr := sqb.handleSubquery(ctx, pred, sqb.totalID) - if err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + subq := sqb.handleSubquery(ctx, pred, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, pred); err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + jpc.inspectPredicate(ctx, pred) } if len(jpc.remainingPredicates) == 0 { cond.On = nil @@ -294,9 +252,6 @@ func (sqb *SubQueryBuilder) inspectOnExpr( onPreds = append(onPreds, jpc.predicates...) onJoinCols = append(onJoinCols, jpc.joinColumns...) }, ctx.SemTable.CopySemanticInfo) - if err != nil { - return - } newFrom = append(newFrom, tbl.(sqlparser.TableExpr)) } return @@ -309,7 +264,7 @@ func createComparisonSubQuery( subFromOutside *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { subq, outside := semantics.GetSubqueryAndOtherSide(parent) if outside == nil || subq != subFromOutside { panic("uh oh") @@ -323,10 +278,7 @@ func createComparisonSubQuery( filterType = opcode.PulloutNotIn } - subquery, err := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) - if err != nil { - return nil, err - } + subquery := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) // if we are comparing with a column from the inner subquery, // we add this extra predicate to check if the two sides are mergable or not @@ -338,7 +290,7 @@ func createComparisonSubQuery( } } - return subquery, err + return subquery } func (sqb *SubQueryBuilder) pullOutValueSubqueries( @@ -346,25 +298,22 @@ func (sqb *SubQueryBuilder) pullOutValueSubqueries( expr sqlparser.Expr, outerID semantics.TableSet, isDML bool, -) (sqlparser.Expr, []*SubQuery, error) { +) (sqlparser.Expr, []*SubQuery) { original := sqlparser.CloneExpr(expr) sqe := extractSubQueries(ctx, expr, isDML) if sqe == nil { - return nil, nil, nil + return nil, nil } var newSubqs []*SubQuery for idx, subq := range sqe.subq { - sqInner, err := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) - if err != nil { - return nil, nil, err - } + sqInner := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) newSubqs = append(newSubqs, sqInner) } sqb.Inner = append(sqb.Inner, newSubqs...) - return sqe.new, newSubqs, nil + return sqe.new, newSubqs } type subqueryExtraction struct { diff --git a/go/vt/vtgate/planbuilder/operators/subquery_container.go b/go/vt/vtgate/planbuilder/operators/subquery_container.go index ab8d1104623..e4feeab49d8 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_container.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_container.go @@ -18,7 +18,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -27,15 +26,15 @@ type ( // The inner subqueries can be executed in any order, so we store them like this so we can see more opportunities // for merging SubQueryContainer struct { - Outer ops.Operator + Outer Operator Inner []*SubQuery } ) -var _ ops.Operator = (*SubQueryContainer)(nil) +var _ Operator = (*SubQueryContainer)(nil) // Clone implements the Operator interface -func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { +func (sqc *SubQueryContainer) Clone(inputs []Operator) Operator { result := &SubQueryContainer{ Outer: inputs[0], } @@ -49,13 +48,13 @@ func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { return result } -func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sqc.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sqc *SubQueryContainer) Inputs() []ops.Operator { - operators := []ops.Operator{sqc.Outer} +func (sqc *SubQueryContainer) Inputs() []Operator { + operators := []Operator{sqc.Outer} for _, inner := range sqc.Inner { operators = append(operators, inner) } @@ -63,7 +62,7 @@ func (sqc *SubQueryContainer) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (sqc *SubQueryContainer) SetInputs(ops []ops.Operator) { +func (sqc *SubQueryContainer) SetInputs(ops []Operator) { sqc.Outer = ops[0] } @@ -71,7 +70,7 @@ func (sqc *SubQueryContainer) ShortDescription() string { return "" } -func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sqc.Outer = sqc.Outer.AddPredicate(ctx, expr) return sqc } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index 74761aef5c5..ed0c6bde941 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -26,13 +26,11 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool { +func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op Operator) bool { validVindex := func(expr sqlparser.Expr) bool { sc := findColumnVindex(ctx, op, expr) return sc != nil && sc.IsUnique() @@ -74,24 +72,24 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme } } -func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Operator { - visit := func(op ops.Operator, lhsTables semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func settleSubqueries(ctx *plancontext.PlanningContext, op Operator) Operator { + visit := func(op Operator, lhsTables semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch op := op.(type) { case *SubQueryContainer: outer := op.Outer for _, subq := range op.Inner { newOuter, err := subq.settle(ctx, outer) if err != nil { - return nil, nil, err + panic(err) } subq.Outer = newOuter outer = subq } - return outer, rewrite.NewTree("extracted subqueries from subquery container"), nil + return outer, Rewrote("extracted subqueries from subquery container") case *Projection: ap, err := op.GetAliasedProjections() if err != nil { - return nil, nil, err + panic(err) } for _, pe := range ap { @@ -102,13 +100,10 @@ func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Ope mergeSubqueryExpr(ctx, setExpr.Expr) } } - return op, rewrite.SameTree, nil + return op, NoRewrite } - op, err := rewrite.BottomUp(op, TableID, visit, nil) - if err != nil { - panic(err) - } - return op + + return BottomUp(op, TableID, visit, nil) } func mergeSubqueryExpr(ctx *plancontext.PlanningContext, pe *ProjExpr) { @@ -194,7 +189,7 @@ func tryPushSubQueryInJoin( ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { lhs := TableID(outer.LHS) rhs := TableID(outer.RHS) joinID := TableID(outer) @@ -213,12 +208,9 @@ func tryPushSubQueryInJoin( // in general, we don't want to push down uncorrelated subqueries into the RHS of a join, // since this side is executed once per row from the LHS, so we would unnecessarily execute // the subquery multiple times. The exception is if we can merge the subquery with the RHS of the join. - merged, result, err := tryMergeWithRHS(ctx, inner, outer) - if err != nil { - return nil, nil, err - } + merged, result := tryMergeWithRHS(ctx, inner, outer) if merged != nil { - return merged, result, nil + return merged, result } _, ok := inner.Subquery.(*Projection) @@ -227,41 +219,37 @@ func tryPushSubQueryInJoin( // Projections are easy to push down, so if this is still at the top, // it means we have not tried pushing it yet. // Let's give it a chance to push down before we push it on the left - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(lhs) { // we can safely push down the subquery on the LHS outer.LHS = addSubQuery(outer.LHS, inner) - return outer, rewrite.NewTree("push subquery into LHS of join"), nil + return outer, Rewrote("push subquery into LHS of join") } if outer.LeftJoin || len(inner.Predicates) == 0 { // we can't push any filters on the RHS of an outer join, and // we don't want to push uncorrelated subqueries to the RHS of a join - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(rhs) { // we can push down the subquery filter on RHS of the join outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join"), nil + return outer, Rewrote("push subquery into RHS of join") } if deps.IsSolvedBy(joinID) { // we can rewrite the predicate to not use the values from the lhs, // and instead use arguments for these dependencies. // this way we can push the subquery into the RHS of this join - err := inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) - if err != nil { - return nil, nil, err - } - + inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join rewriting predicates"), nil + return outer, Rewrote("push subquery into RHS of join rewriting predicates") } - return nil, rewrite.SameTree, nil + return nil, NoRewrite } // extractLHSExpr will return a function that extracts any ColName coming from the LHS table, @@ -270,37 +258,34 @@ func extractLHSExpr( ctx *plancontext.PlanningContext, outer *ApplyJoin, lhs semantics.TableSet, -) func(expr sqlparser.Expr) (sqlparser.Expr, error) { - return func(expr sqlparser.Expr) (sqlparser.Expr, error) { - col, err := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, lhs) - if err != nil { - return nil, err - } +) func(expr sqlparser.Expr) sqlparser.Expr { + return func(expr sqlparser.Expr) sqlparser.Expr { + col := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, lhs) if col.IsPureLeft() { - return nil, vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here") + panic(vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here")) } for _, bve := range col.LHSExprs { if !outer.isColNameMovedFromL2R(bve.Name) { outer.ExtraLHSVars = append(outer.ExtraLHSVars, bve) } } - return col.RHSExpr, nil + return col.RHSExpr } } // tryMergeWithRHS attempts to merge a subquery with the RHS of a join -func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (Operator, *ApplyResult) { if outer.LeftJoin { - return nil, nil, nil + return nil, nil } // both sides need to be routes outerRoute, ok := outer.RHS.(*Route) if !ok { - return nil, nil, nil + return nil, nil } innerRoute, ok := inner.Subquery.(*Route) if !ok { - return nil, nil, nil + return nil, nil } newExpr := rewriteOriginalPushedToRHS(ctx, inner.Original, outer) @@ -311,18 +296,18 @@ func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *A } newOp := mergeSubqueryInputs(ctx, innerRoute, outerRoute, inner.GetMergePredicates(), sqm) if newOp == nil { - return nil, nil, nil + return nil, nil } outer.RHS = newOp ctx.MergedSubqueries = append(ctx.MergedSubqueries, inner.originalSubquery) - return outer, rewrite.NewTree("merged subquery with rhs of join"), nil + return outer, Rewrote("merged subquery with rhs of join") } // addSubQuery adds a SubQuery to the given operator. If the operator is a SubQueryContainer, // it will add the SubQuery to the SubQueryContainer. If the operator is something else, it will // create a new SubQueryContainer with the given operator as the outer and the SubQuery as the inner. -func addSubQuery(in ops.Operator, inner *SubQuery) ops.Operator { +func addSubQuery(in Operator, inner *SubQuery) Operator { sql, ok := in.(*SubQueryContainer) if !ok { return &SubQueryContainer{ @@ -364,10 +349,10 @@ func rewriteOriginalPushedToRHS(ctx *plancontext.PlanningContext, expression sql return result.(sqlparser.Expr) } -func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { +func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } outer := TableID(src.Outer) @@ -378,7 +363,7 @@ func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Project } if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil + return p, NoRewrite } if se, ok := pe.Info.(SubQueryExpression); ok { @@ -387,7 +372,7 @@ func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Project } // all projections can be pushed to the outer src.Outer, p.Source = p, src.Outer - return src, rewrite.NewTree("push projection into outer side of subquery container"), nil + return src, Rewrote("push projection into outer side of subquery container") } func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Expr, se SubQueryExpression, subqueries ...*SubQuery) sqlparser.Expr { @@ -433,19 +418,16 @@ func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Exp return result.(sqlparser.Expr) } -func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (Operator, *ApplyResult) { if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } var remaining []*SubQuery - var result *rewrite.ApplyResult + var result *ApplyResult for _, inner := range in.Inner { - newOuter, _result, err := pushOrMerge(ctx, in.Outer, inner) - if err != nil { - return nil, nil, err - } - if _result == rewrite.SameTree { + newOuter, _result := pushOrMerge(ctx, in.Outer, inner) + if _result == NoRewrite { remaining = append(remaining, inner) continue } @@ -455,26 +437,26 @@ func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQuery } if len(remaining) == 0 { - return in.Outer, result, nil + return in.Outer, result } in.Inner = remaining - return in, result, nil + return in, result } func tryMergeSubQuery( ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, -) (newOuter ops.Operator, result *rewrite.ApplyResult, err error) { +) (newOuter Operator, result *ApplyResult) { switch inner := subQuery.Subquery.(type) { case *Route: return tryMergeSubqueryWithOuter(ctx, subQuery, outer, inner) case *SubQueryContainer: return tryMergeSubqueriesRecursively(ctx, subQuery, outer, inner) } - return outer, rewrite.SameTree, nil + return outer, NoRewrite } // tryMergeSubqueriesRecursively attempts to merge a SubQueryContainer with the outer Route. @@ -483,7 +465,7 @@ func tryMergeSubqueriesRecursively( subQuery *SubQuery, outer *Route, inner *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { exprs := subQuery.GetMergePredicates() merger := &subqueryRouteMerger{ outer: outer, @@ -492,32 +474,29 @@ func tryMergeSubqueriesRecursively( } op := mergeSubqueryInputs(ctx, inner.Outer, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } op = Clone(op).(*Route) op.Source = outer.Source - var finalResult *rewrite.ApplyResult + var finalResult *ApplyResult for _, subq := range inner.Inner { - newOuter, res, err := tryMergeSubQuery(ctx, subq, op) - if err != nil { - return nil, nil, err - } - if res == rewrite.SameTree { + newOuter, res := tryMergeSubQuery(ctx, subq, op) + if res == NoRewrite { // we failed to merge one of the inners - we need to abort - return nil, rewrite.SameTree, nil + return nil, NoRewrite } op = newOuter.(*Route) finalResult = finalResult.Merge(res) } op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} - return op, finalResult.Merge(rewrite.NewTree("merge outer of two subqueries")), nil + return op, finalResult.Merge(Rewrote("merge outer of two subqueries")) } -func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner ops.Operator) (ops.Operator, *rewrite.ApplyResult, error) { +func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner Operator) (Operator, *ApplyResult) { if updOp, ok := outer.Source.(*Update); ok && mergingIsBlocked(subQuery, updOp) { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } exprs := subQuery.GetMergePredicates() merger := &subqueryRouteMerger{ @@ -527,13 +506,13 @@ func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQu } op := mergeSubqueryInputs(ctx, inner, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } if !subQuery.IsProjection { op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} } ctx.MergedSubqueries = append(ctx.MergedSubqueries, subQuery.originalSubquery) - return op, rewrite.NewTree("merged subquery with outer"), nil + return op, Rewrote("merged subquery with outer") } // This checked if subquery is part of the changed vindex values. Subquery cannot be merged with the outer route. @@ -546,21 +525,18 @@ func mergingIsBlocked(subQuery *SubQuery, updOp *Update) bool { return false } -func pushOrMerge(ctx *plancontext.PlanningContext, outer ops.Operator, inner *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMerge(ctx *plancontext.PlanningContext, outer Operator, inner *SubQuery) (Operator, *ApplyResult) { switch o := outer.(type) { case *Route: return tryMergeSubQuery(ctx, inner, o) case *ApplyJoin: - join, applyResult, err := tryPushSubQueryInJoin(ctx, inner, o) - if err != nil { - return nil, nil, err - } + join, applyResult := tryPushSubQueryInJoin(ctx, inner, o) if join == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } - return join, applyResult, nil + return join, applyResult default: - return outer, rewrite.SameTree, nil + return outer, NoRewrite } } @@ -618,10 +594,7 @@ func (s *subqueryRouteMerger) mergeShardedRouting(ctx *plancontext.PlanningConte }) } - routing, err := tr.resetRoutingLogic(ctx) - if err != nil { - panic(err) - } + routing := tr.resetRoutingLogic(ctx) return s.merge(ctx, old1, old2, routing) } @@ -637,7 +610,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out } } _, isSharded := r.(*ShardedRouting) - var src ops.Operator + var src Operator if isSharded { src = s.outer.Source if !s.subq.IsProjection { @@ -665,7 +638,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out // we should be able to use this method for all plan types, // but using this method for sharded queries introduces bugs // We really need to figure out why this is not working as expected -func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) ops.Operator { +func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) Operator { src := s.outer.Source stmt, _, err := ToSQL(ctx, inner.Source) if err != nil { @@ -726,7 +699,7 @@ func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningCont // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. // These rules are similar but different from join merging -func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out ops.Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { +func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { inRoute, outRoute := operatorsToRoutes(in, out) if inRoute == nil || outRoute == nil { return nil diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index e731ec54201..93b406232b2 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -43,7 +42,7 @@ type ( ) // Clone implements the Operator interface -func (to *Table) Clone([]ops.Operator) ops.Operator { +func (to *Table) Clone([]Operator) Operator { var columns []*sqlparser.ColName for _, name := range to.Columns { columns = append(columns, sqlparser.CloneRefOfColName(name)) @@ -61,7 +60,7 @@ func (to *Table) introducesTableID() semantics.TableSet { } // AddPredicate implements the PhysicalOperator interface -func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return newFilter(to, expr) } @@ -92,7 +91,7 @@ func (to *Table) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, to) } -func (to *Table) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (to *Table) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/ops/to_json.go b/go/vt/vtgate/planbuilder/operators/to_json.go similarity index 98% rename from go/vt/vtgate/planbuilder/operators/ops/to_json.go rename to go/vt/vtgate/planbuilder/operators/to_json.go index 2b8b747f433..48b7fa9a247 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/to_json.go +++ b/go/vt/vtgate/planbuilder/operators/to_json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ops +package operators import ( "fmt" diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go index b3d866a00a3..454a6370c2f 100644 --- a/go/vt/vtgate/planbuilder/operators/union.go +++ b/go/vt/vtgate/planbuilder/operators/union.go @@ -23,12 +23,11 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Union struct { - Sources []ops.Operator + Sources []Operator // These are the select expressions coming from each source Selects []sqlparser.SelectExprs @@ -38,7 +37,7 @@ type Union struct { unionColumnsAsAlisedExprs []*sqlparser.AliasedExpr } -func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { +func newUnion(srcs []Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { if columns == nil { panic("rt") } @@ -51,24 +50,24 @@ func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, column } // Clone implements the Operator interface -func (u *Union) Clone(inputs []ops.Operator) ops.Operator { +func (u *Union) Clone(inputs []Operator) Operator { newOp := *u newOp.Sources = inputs newOp.Selects = slices.Clone(u.Selects) return &newOp } -func (u *Union) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Union) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (u *Union) Inputs() []ops.Operator { +func (u *Union) Inputs() []Operator { return u.Sources } // SetInputs implements the Operator interface -func (u *Union) SetInputs(ops []ops.Operator) { +func (u *Union) SetInputs(ops []Operator) { u.Sources = ops } @@ -93,7 +92,7 @@ Notice how `X.col = 42` has been translated to `foo = 42` and `id = 42` on respe The first SELECT of the union dictates the column names, and the second is whatever expression can be found on the same offset. The names of the RHS are discarded. */ -func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { offsets := make(map[string]int) sel, err := u.GetSelectFor(0) if err != nil { diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go index 03b7a212893..953d779c6a1 100644 --- a/go/vt/vtgate/planbuilder/operators/union_merging.go +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -19,14 +19,12 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeUnionInputInAnyOrder merges sources the sources of the union in any order // can be used for UNION DISTINCT -func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects @@ -43,10 +41,7 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } selA := selects[idx] selB := selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[idx] = newPlan selects[idx] = sel @@ -57,10 +52,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } } if !merged { - return sources, selects, nil + return sources, selects } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs for i, source := range sources { if keep[i] || i <= idx { @@ -73,10 +68,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o selects = newSelects } - return sources, selects, nil + return sources, selects } -func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects for { @@ -85,10 +80,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops j := i + 1 srcA, selA := sources[i], selects[i] srcB, selB := sources[j], selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[i] = newPlan selects[i] = sel @@ -102,7 +94,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops } } - return sources, selects, nil + return sources, selects } // mergeUnionInputs checks whether two operators can be merged into a single one. @@ -111,13 +103,13 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops // this function is very similar to mergeJoinInputs func mergeUnionInputs( ctx *plancontext.PlanningContext, - lhs, rhs ops.Operator, + lhs, rhs Operator, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { - return nil, nil, nil + return nil, nil } switch { @@ -134,12 +126,12 @@ func mergeUnionInputs( return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) case a == sharded && b == sharded && sameKeyspace: - res, exprs, err := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) - if err != nil || res != nil { - return res, exprs, err + res, exprs := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) + if res != nil { + return res, exprs } } - return nil, nil, nil + return nil, nil } func tryMergeUnionShardedRouting( @@ -147,7 +139,7 @@ func tryMergeUnionShardedRouting( routeA, routeB *Route, exprsA, exprsB sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { tblA := routeA.Routing.(*ShardedRouting) tblB := routeB.Routing.(*ShardedRouting) @@ -173,7 +165,7 @@ func tryMergeUnionShardedRouting( } } - return nil, nil, nil + return nil, nil } func createMergedUnion( @@ -181,7 +173,7 @@ func createMergedUnion( lhsRoute, rhsRoute *Route, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, - routing Routing) (ops.Operator, sqlparser.SelectExprs, error) { + routing Routing) (Operator, sqlparser.SelectExprs) { // if there are `*` on either side, or a different number of SelectExpr items, // we give up aligning the expressions and trust that we can push everything down @@ -210,16 +202,16 @@ func createMergedUnion( ctx.SemTable.Recursive[col] = deps } - union := newUnion([]ops.Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) + union := newUnion([]Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) selectExprs := unionSelects(lhsExprs) return &Route{ Source: union, MergedWith: []*Route{rhsRoute}, Routing: routing, - }, selectExprs, nil + }, selectExprs } -func compactUnion(u *Union) *rewrite.ApplyResult { +func compactUnion(u *Union) *ApplyResult { if u.distinct { // first we remove unnecessary DISTINCTs for idx, source := range u.Sources { @@ -231,7 +223,7 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs merged := false @@ -250,10 +242,10 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } if !merged { - return rewrite.SameTree + return NoRewrite } u.Sources = newSources u.Selects = newSelects - return rewrite.NewTree("merged UNIONs") + return Rewrote("merged UNIONs") } diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index 8868e83c247..ccfdddc3ea9 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -71,14 +70,14 @@ func (u *Update) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (u *Update) Clone([]ops.Operator) ops.Operator { +func (u *Update) Clone([]Operator) Operator { upd := *u upd.Assignments = slices.Clone(u.Assignments) upd.ChangedVindexValues = maps.Clone(u.ChangedVindexValues) return &upd } -func (u *Update) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Update) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -100,51 +99,39 @@ func (u *Update) ShortDescription() string { return strings.Join(s, " ") } -func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - if err != nil { - return nil, err - } +func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) Operator { + tableInfo, qt := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") - if err != nil { - return nil, err - } + vindexTable, routing := buildVindexTableForDML(ctx, tableInfo, qt, "update") updClone := sqlparser.CloneRefOfUpdate(updStmt) - updOp, err := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) - if err != nil { - return nil, err - } + updOp := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) parentFks := ctx.SemTable.GetParentForeignKeysList() childFks := ctx.SemTable.GetChildForeignKeysList() if len(childFks) == 0 && len(parentFks) == 0 { - return updOp, nil + return updOp } // If the delete statement has a limit, we don't support it yet. if updStmt.Limit != nil { - return nil, vterrors.VT12001("update with limit with foreign key constraints") + panic(vterrors.VT12001("update with limit with foreign key constraints")) } // Now we check if any of the foreign key columns that are being udpated have dependencies on other updated columns. // This is unsafe, and we currently don't support this in Vitess. - if err = ctx.SemTable.ErrIfFkDependentColumnUpdated(updStmt.Exprs); err != nil { - return nil, err + if err := ctx.SemTable.ErrIfFkDependentColumnUpdated(updStmt.Exprs); err != nil { + panic(err) } return buildFkOperator(ctx, updOp, updClone, parentFks, childFks, vindexTable) } -func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) (ops.Operator, error) { +func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) Operator { sqc := &SubQueryBuilder{} assignments := make([]SetExpr, len(updStmt.Exprs)) for idx, updExpr := range updStmt.Exprs { - expr, subqs, err := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) - if err != nil { - return nil, err - } + expr, subqs := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) if len(subqs) == 0 { expr = updExpr.Expr } @@ -158,10 +145,7 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U } } - vp, cvv, ovq, subQueriesArgOnChangedVindex, err := getUpdateVindexInformation(ctx, updStmt, vindexTable, qt.ID, assignments) - if err != nil { - return nil, err - } + vp, cvv, ovq, subQueriesArgOnChangedVindex := getUpdateVindexInformation(ctx, updStmt, vindexTable, qt.ID, assignments) tr, ok := routing.(*ShardedRouting) if ok { @@ -169,20 +153,15 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U } for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { + if subq := sqc.handleSubquery(ctx, predicate, qt.ID); subq != nil { continue } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + panic(vterrors.VT12001("multi shard UPDATE with LIMIT")) } route := &Route{ @@ -201,23 +180,20 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U Comments: updStmt.Comments, } - decorator := func(op ops.Operator) ops.Operator { + decorator := func(op Operator) Operator { return &LockAndComment{ Source: op, Lock: sqlparser.ShareModeLock, } } - return sqc.getRootOperator(route, decorator), nil + return sqc.getRootOperator(route, decorator) } -func buildFkOperator(ctx *plancontext.PlanningContext, updOp ops.Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func buildFkOperator(ctx *plancontext.PlanningContext, updOp Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) Operator { restrictChildFks, cascadeChildFks := splitChildFks(childFks) - op, err := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) - if err != nil { - return nil, err - } + op := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks, updatedTable) } @@ -241,9 +217,9 @@ func splitChildFks(fks []vindexes.ChildFKInfo) (restrictChildFks, cascadeChildFk return } -func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) Operator { if len(childFks) == 0 { - return parentOp, nil + return parentOp } var fkChildren []*FkChild @@ -252,7 +228,7 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, for _, fk := range childFks { // We should have already filtered out update restrict foreign keys. if fk.OnUpdate.IsRestrict() { - return nil, vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered") + panic(vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered")) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. @@ -275,23 +251,17 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, } } - fkChild, err := createFkChildForUpdate(ctx, fk, selectOffsets, nonLiteralUpdateInfo, updatedTable) - if err != nil { - return nil, err - } + fkChild := createFkChildForUpdate(ctx, fk, selectOffsets, nonLiteralUpdateInfo, updatedTable) fkChildren = append(fkChildren, fkChild) } - selectionOp, err := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, sqlparser.ForUpdateLockNoWait) - if err != nil { - return nil, err - } + selectionOp := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, sqlparser.ForUpdateLockNoWait) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } } // hasNonLiteralUpdate checks if any of the update expressions have a non-literal update. @@ -404,7 +374,7 @@ func getCastTypeForColumn(updatedTable *vindexes.Table, updExpr *sqlparser.Updat } // createFkChildForUpdate creates the update query operator for the child table based on the foreign key constraints. -func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, selectOffsets []int, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) (*FkChild, error) { +func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, selectOffsets []int, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) *FkChild { // Create a ValTuple of child column names var valTuple sqlparser.ValTuple for _, column := range fk.ChildColumns { @@ -426,18 +396,14 @@ func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildF } } - var childOp ops.Operator - var err error + var childOp Operator switch fk.OnUpdate { case sqlparser.Cascade: - childOp, err = buildChildUpdOpForCascade(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) + childOp = buildChildUpdOpForCascade(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetNull: - childOp, err = buildChildUpdOpForSetNull(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) + childOp = buildChildUpdOpForSetNull(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetDefault: - return nil, vterrors.VT09016() - } - if err != nil { - return nil, err + panic(vterrors.VT09016()) } return &FkChild{ @@ -445,14 +411,14 @@ func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildF Cols: selectOffsets, Op: childOp, NonLiteralInfo: nonLiteralUpdateInfo, - }, nil + } } // buildChildUpdOpForCascade builds the child update statement operator for the CASCADE type foreign key constraint. // The query looks like this - // // `UPDATE SET WHERE IN ()` -func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) Operator { // The update expressions are the same as the update expressions in the parent update query // with the column names replaced with the child column names. var childUpdateExprs sqlparser.UpdateExprs @@ -501,7 +467,7 @@ func buildChildUpdOpForSetNull( childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table, -) (ops.Operator, error) { +) Operator { // For the SET NULL type constraint, we need to set all the child columns to NULL. var childUpdateExprs sqlparser.UpdateExprs for _, column := range fk.ChildColumns { @@ -557,23 +523,20 @@ func getParsedCommentsForFkChecks(ctx *plancontext.PlanningContext) (parsedComme // createFKVerifyOp creates the verify operator for the parent foreign key constraints. func createFKVerifyOp( ctx *plancontext.PlanningContext, - childOp ops.Operator, + childOp Operator, updStmt *sqlparser.Update, parentFks []vindexes.ParentFKInfo, restrictChildFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table, -) (ops.Operator, error) { +) Operator { if len(parentFks) == 0 && len(restrictChildFks) == 0 { - return childOp, nil + return childOp } var Verify []*VerifyOp // This validates that new values exists on the parent table. for _, fk := range parentFks { - op, err := createFkVerifyOpForParentFKForUpdate(ctx, updatedTable, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForParentFKForUpdate(ctx, updatedTable, updStmt, fk) Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ParentVerify, @@ -581,10 +544,8 @@ func createFKVerifyOp( } // This validates that the old values don't exist on the child table. for _, fk := range restrictChildFks { - op, err := createFkVerifyOpForChildFKForUpdate(ctx, updatedTable, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForChildFKForUpdate(ctx, updatedTable, updStmt, fk) + Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ChildVerify, @@ -594,7 +555,7 @@ func createFKVerifyOp( return &FkVerify{ Verify: Verify, Input: childOp, - }, nil + } } // Each parent foreign key constraint is verified by an anti join query of the form: @@ -608,11 +569,11 @@ func createFKVerifyOp( // where Parent.p1 is null and Parent.p2 is null and Child.id = 1 and Child.c2 + 1 is not null // and Child.c2 is not null and not ((Child.c1) <=> (Child.c2 + 1)) // limit 1 -func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) (ops.Operator, error) { +func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) Operator { childTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) childTbl, err := childTblExpr.TableName() if err != nil { - return nil, err + panic(err) } parentTbl := pFK.Table.GetTableName() var whereCond sqlparser.Expr @@ -708,16 +669,16 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, upda // verify query: // select 1 from Child join Parent on Parent.p1 = Child.c1 and Parent.p2 = Child.c2 // where Parent.id = 1 and ((Parent.col + 1) IS NULL OR (child.c1) NOT IN ((Parent.col + 1))) limit 1 -func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) (ops.Operator, error) { +func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) Operator { // ON UPDATE RESTRICT foreign keys that require validation, should only be allowed in the case where we // are verifying all the FKs on vtgate level. if !ctx.VerifyAllFKs { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } parentTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) parentTbl, err := parentTblExpr.TableName() if err != nil { - return nil, err + panic(err) } childTbl := cFk.Table.GetTableName() var joinCond sqlparser.Expr diff --git a/go/vt/vtgate/planbuilder/operators/utils_test.go b/go/vt/vtgate/planbuilder/operators/utils_test.go index a7e25c1337c..596489150da 100644 --- a/go/vt/vtgate/planbuilder/operators/utils_test.go +++ b/go/vt/vtgate/planbuilder/operators/utils_test.go @@ -20,33 +20,32 @@ import ( "slices" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type fakeOp struct { id semantics.TableSet - inputs []ops.Operator + inputs []Operator cols []*sqlparser.AliasedExpr } -var _ ops.Operator = (*fakeOp)(nil) +var _ Operator = (*fakeOp)(nil) -func (f *fakeOp) Clone(inputs []ops.Operator) ops.Operator { +func (f *fakeOp) Clone(inputs []Operator) Operator { return f } -func (f *fakeOp) Inputs() []ops.Operator { +func (f *fakeOp) Inputs() []Operator { return f.inputs } -func (f *fakeOp) SetInputs(operators []ops.Operator) { +func (f *fakeOp) SetInputs(operators []Operator) { // TODO implement me panic("implement me") } -func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // TODO implement me panic("implement me") } @@ -80,7 +79,7 @@ func (f *fakeOp) ShortDescription() string { panic("implement me") } -func (f *fakeOp) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (f *fakeOp) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { // TODO implement me panic("implement me") } diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go index 2fe2bf4d3e5..f8667b45aba 100644 --- a/go/vt/vtgate/planbuilder/operators/vindex.go +++ b/go/vt/vtgate/planbuilder/operators/vindex.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -57,7 +56,7 @@ func (v *Vindex) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (v *Vindex) Clone([]ops.Operator) ops.Operator { +func (v *Vindex) Clone([]Operator) Operator { clone := *v return &clone } @@ -101,7 +100,7 @@ func (v *Vindex) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, v) } -func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -121,7 +120,7 @@ func (v *Vindex) CheckValid() error { return nil } -func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { deps := ctx.SemTable.RecursiveDeps(e) if deps.NumberOfTables() > 1 { diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index a488dd1e470..b5c814b2ea6 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -41,7 +41,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtgate/engine" - oprewriters "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -261,7 +261,7 @@ func TestViews(t *testing.T) { } func TestOne(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() + reset := operators.EnableDebugPrinting() defer reset() lv := loadSchema(t, "vschemas/schema.json", true) @@ -319,7 +319,7 @@ func TestOneWithUserAsDefault(t *testing.T) { } func TestOneWithTPCHVSchema(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() + reset := operators.EnableDebugPrinting() defer reset() vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(t, "vschemas/tpch_schema.json", true), diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 44976815bd2..77c883325c5 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -225,7 +224,7 @@ func newBuildSelectPlan( return plan, operators.TablesUsed(op), nil } -func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (ops.Operator, error) { +func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (operators.Operator, error) { err := queryRewrite(ctx.SemTable, reservedVars, selStmt) if err != nil { return nil, err From 645ae4749e980e90641eb5dc409ebb3e55ebc549 Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Mon, 4 Dec 2023 12:53:15 +0100 Subject: [PATCH 27/33] vtbackup: Fix copy pasta typo in option description (#14664) Signed-off-by: Dirkjan Bussink --- go/cmd/vtbackup/cli/vtbackup.go | 2 +- go/flags/endtoend/vtbackup.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go index 560e9d21b2a..d55cf643de4 100644 --- a/go/cmd/vtbackup/cli/vtbackup.go +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -208,7 +208,7 @@ func init() { Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket") Main.Flags().DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup") - Main.Flags().DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "how long to wait for mysqld startup") + Main.Flags().DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "how long to wait for mysqld shutdown") Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db") Main.Flags().BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal") Main.Flags().DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.") diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 3971472b74b..5fedbde91c6 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -174,7 +174,7 @@ Flags: --mycnf_slow_log_path string mysql slow query log path --mycnf_socket_file string mysql socket file --mycnf_tmp_dir string mysql tmp directory - --mysql-shutdown-timeout duration how long to wait for mysqld startup (default 5m0s) + --mysql-shutdown-timeout duration how long to wait for mysqld shutdown (default 5m0s) --mysql_port int mysql port (default 3306) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysql_socket string path to the mysql socket From 69e65bd8bef07e64669ee4977b5b57ed2cc68d52 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Mon, 4 Dec 2023 06:56:53 -0500 Subject: [PATCH 28/33] VReplication: Properly Handle FK Constraints When Deferring Secondary Keys (#14543) Signed-off-by: Matt Lord --- .../tabletmanager/vreplication/vreplicator.go | 21 ++++++- .../vreplication/vreplicator_test.go | 59 ++++++++++++++++++- 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index c4df7e618d5..2a362645708 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -739,8 +739,27 @@ func (vr *vreplicator) getTableSecondaryKeys(ctx context.Context, tableName stri return nil, fmt.Errorf("could not determine CREATE TABLE statement from table schema %q", tableSchema) } - for _, index := range createTable.GetTableSpec().Indexes { + tableSpec := createTable.GetTableSpec() + fkIndexCols := make(map[string]bool) + for _, constraint := range tableSpec.Constraints { + if fkDef, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition); ok { + fkCols := make([]string, len(fkDef.Source)) + for i, fkCol := range fkDef.Source { + fkCols[i] = fkCol.Lowered() + } + fkIndexCols[strings.Join(fkCols, ",")] = true + } + } + for _, index := range tableSpec.Indexes { if index.Info.Type != sqlparser.IndexTypePrimary { + cols := make([]string, len(index.Columns)) + for i, col := range index.Columns { + cols[i] = col.Column.Lowered() + } + if fkIndexCols[strings.Join(cols, ",")] { + // This index is needed for a FK constraint so we cannot drop it. + continue + } secondaryKeys = append(secondaryKeys, index) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index aaa4a974191..dd4b9dc70f8 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -34,9 +34,10 @@ import ( "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/schemadiff" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/schemadiff" ) func TestRecalculatePKColsInfoByColumnNames(t *testing.T) { @@ -256,6 +257,7 @@ func TestDeferSecondaryKeys(t *testing.T) { wantStashErr string wantExecErr string expectFinalSchemaDiff bool + preStashHook func() error postStashHook func() error }{ { @@ -297,6 +299,54 @@ func TestDeferSecondaryKeys(t *testing.T) { actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2)", WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), }, + { + name: "2SK:1FK", + tableName: "t1", + initialDDL: "create table t1 (id int not null, c1 int default null, c2 int default null, t2_id int not null, primary key (id), key c1 (c1), key c2 (c2), foreign key (t2_id) references t2 (id))", + // Secondary key t2_id is needed to enforce the FK constraint so we do not drop it. + strippedDDL: "create table t1 (id int not null, c1 int default null, c2 int default null, t2_id int not null, primary key (id), key t2_id (t2_id), constraint t1_ibfk_1 foreign key (t2_id) references t2 (id))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, c1 int not null, primary key (id))", 1) + return err + }, + }, + { + name: "3SK:2FK", + tableName: "t1", + initialDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key c1 (c1), key c2 (c2), foreign key (t2_id) references t2 (id), key c3 (c3), foreign key (t2_id2) references t2 (id2))", + // Secondary keys t2_id and t2_id2 are needed to enforce the FK constraint so we do not drop them. + strippedDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key t2_id (t2_id), key t2_id2 (t2_id2), constraint t1_ibfk_1 foreign key (t2_id) references t2 (id), constraint t1_ibfk_2 foreign key (t2_id2) references t2 (id2))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2), add key c3 (c3)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, id2 int default null, c1 int not null, primary key (id), key (id2))", 1) + return err + }, + }, + { + name: "5SK:2FK_multi-column", + tableName: "t1", + initialDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key c1 (c1), key c2 (c2), key t2_cs (c1,c2), key t2_ids (t2_id,t2_id2), foreign key (t2_id,t2_id2) references t2 (id, id2), key c3 (c3), foreign key (c1, c2) references t2 (c1, c2))", + // Secondary keys t2_ids and t2_cs are needed to enforce the FK constraint so we do not drop them. + strippedDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key t2_cs (c1,c2), key t2_ids (t2_id,t2_id2), constraint t1_ibfk_1 foreign key (t2_id, t2_id2) references t2 (id, id2), constraint t1_ibfk_2 foreign key (c1, c2) references t2 (c1, c2))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2), add key c3 (c3)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, id2 int not null, c1 int not null, c2 int not null, primary key (id,id2), key (c1,c2))", 1) + return err + }, + }, { name: "2tSK", tableName: "t1", @@ -425,6 +475,11 @@ func TestDeferSecondaryKeys(t *testing.T) { // MoveTables and Reshard workflows. vr.WorkflowType = tcase.WorkflowType + if tcase.preStashHook != nil { + err = tcase.preStashHook() + require.NoError(t, err, "error executing pre stash hook: %v", err) + } + // Create the table. _, err := dbClient.ExecuteFetch(tcase.initialDDL, 1) require.NoError(t, err) @@ -456,7 +511,7 @@ func TestDeferSecondaryKeys(t *testing.T) { if tcase.postStashHook != nil { err = tcase.postStashHook() - require.NoError(t, err) + require.NoError(t, err, "error executing post stash hook: %v", err) // We should still NOT have any secondary keys because there's still // a running controller/vreplicator in the copy phase. From 17a7e59db92bc99d1c36a8196299149ae5ecb4a9 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Mon, 4 Dec 2023 14:04:49 +0200 Subject: [PATCH 29/33] TableGC: speed up GC process via `RequestChecks()`. Utilized by Online DDL for artifact cleanup (#14431) Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- .../tabletmanager/tablegc/tablegc_test.go | 168 +++++++++++------- go/timer/suspendable_ticker.go | 20 ++- go/timer/suspendable_ticker_test.go | 144 +++++++++++++++ go/vt/vttablet/onlineddl/executor.go | 5 + go/vt/vttablet/tabletserver/gc/tablegc.go | 145 +++++++++------ .../vttablet/tabletserver/gc/tablegc_test.go | 8 +- go/vt/vttablet/tabletserver/tabletserver.go | 2 +- 7 files changed, 359 insertions(+), 133 deletions(-) create mode 100644 go/timer/suspendable_ticker_test.go diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index c6f7253c791..c21a4fe2d99 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/gc" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" @@ -128,13 +129,18 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func checkTableRows(t *testing.T, tableName string, expect int64) { +func getTableRows(t *testing.T, tableName string) int64 { require.NotEmpty(t, tableName) query := `select count(*) as c from %a` parsed := sqlparser.BuildParsedQuery(query, tableName) rs, err := primaryTablet.VttabletProcess.QueryTablet(parsed.Query, keyspaceName, true) require.NoError(t, err) count := rs.Named().Row().AsInt64("c", 0) + return count +} + +func checkTableRows(t *testing.T, tableName string, expect int64) { + count := getTableRows(t, tableName) assert.Equal(t, expect, count) } @@ -176,19 +182,18 @@ func validateTableDoesNotExist(t *testing.T, tableExpr string) { defer cancel() ticker := time.NewTicker(time.Second) - var foundTableName string - var exists bool - var err error + defer ticker.Stop() + for { + exists, foundTableName, err := tableExists(tableExpr) + require.NoError(t, err) + if !exists { + return + } select { case <-ticker.C: - exists, foundTableName, err = tableExists(tableExpr) - require.NoError(t, err) - if !exists { - return - } case <-ctx.Done(): - assert.NoError(t, ctx.Err(), "validateTableDoesNotExist timed out, table %v still exists (%v)", tableExpr, foundTableName) + assert.Failf(t, "validateTableDoesNotExist timed out, table %v still exists (%v)", tableExpr, foundTableName) return } } @@ -199,59 +204,78 @@ func validateTableExists(t *testing.T, tableExpr string) { defer cancel() ticker := time.NewTicker(time.Second) - var exists bool - var err error + defer ticker.Stop() + for { + exists, _, err := tableExists(tableExpr) + require.NoError(t, err) + if exists { + return + } select { case <-ticker.C: - exists, _, err = tableExists(tableExpr) - require.NoError(t, err) - if exists { - return - } case <-ctx.Done(): - assert.NoError(t, ctx.Err(), "validateTableExists timed out, table %v still does not exist", tableExpr) + assert.Failf(t, "validateTableExists timed out, table %v still does not exist", tableExpr) return } } } func validateAnyState(t *testing.T, expectNumRows int64, states ...schema.TableGCState) { - for _, state := range states { - expectTableToExist := true - searchExpr := "" - switch state { - case schema.HoldTableGCState: - searchExpr = `\_vt\_HOLD\_%` - case schema.PurgeTableGCState: - searchExpr = `\_vt\_PURGE\_%` - case schema.EvacTableGCState: - searchExpr = `\_vt\_EVAC\_%` - case schema.DropTableGCState: - searchExpr = `\_vt\_DROP\_%` - case schema.TableDroppedGCState: - searchExpr = `\_vt\_%` - expectTableToExist = false - default: - t.Log("Unknown state") - t.Fail() - } - exists, tableName, err := tableExists(searchExpr) - require.NoError(t, err) - - if exists { - if expectNumRows >= 0 { - checkTableRows(t, tableName, expectNumRows) + t.Run(fmt.Sprintf("validateAnyState: expectNumRows=%v, states=%v", expectNumRows, states), func(t *testing.T) { + timeout := gc.NextChecksIntervals[len(gc.NextChecksIntervals)-1] + 5*time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + // Attempt validation: + for _, state := range states { + expectTableToExist := true + searchExpr := "" + switch state { + case schema.HoldTableGCState: + searchExpr = `\_vt\_HOLD\_%` + case schema.PurgeTableGCState: + searchExpr = `\_vt\_PURGE\_%` + case schema.EvacTableGCState: + searchExpr = `\_vt\_EVAC\_%` + case schema.DropTableGCState: + searchExpr = `\_vt\_DROP\_%` + case schema.TableDroppedGCState: + searchExpr = `\_vt\_%` + expectTableToExist = false + default: + require.Failf(t, "unknown state", "%v", state) + } + exists, tableName, err := tableExists(searchExpr) + require.NoError(t, err) + + var foundRows int64 + if exists { + foundRows = getTableRows(t, tableName) + // Now that the table is validated, we can drop it (test cleanup) + dropTable(t, tableName) + } + t.Logf("=== exists: %v, tableName: %v, rows: %v", exists, tableName, foundRows) + if exists == expectTableToExist { + // expectNumRows < 0 means "don't care" + if expectNumRows < 0 || (expectNumRows == foundRows) { + // All conditions are met + return + } + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + assert.Failf(t, "timeout in validateAnyState", " waiting for any of these states: %v, expecting rows: %v", states, expectNumRows) + return } - // Now that the table is validated, we can drop it - dropTable(t, tableName) - } - if exists == expectTableToExist { - // condition met - return } - } - assert.Failf(t, "could not match any of the states", "states=%v", states) + }) } // dropTable drops a table @@ -309,17 +333,22 @@ func TestHold(t *testing.T) { } func TestEvac(t *testing.T) { - populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("t1", schema.EvacTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) - assert.NoError(t, err) - - _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - assert.NoError(t, err) - - validateTableDoesNotExist(t, "t1") - - time.Sleep(tableTransitionExpiration / 2) - { + var tableName string + t.Run("setting up EVAC table", func(t *testing.T) { + populateTable(t) + var query string + var err error + query, tableName, err = schema.GenerateRenameStatement("t1", schema.EvacTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + assert.NoError(t, err) + + _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + assert.NoError(t, err) + + validateTableDoesNotExist(t, "t1") + }) + + t.Run("validating before expiration", func(t *testing.T) { + time.Sleep(tableTransitionExpiration / 2) // Table was created with +10s timestamp, so it should still exist if fastDropTable { // EVAC state is skipped in mysql 8.0.23 and beyond @@ -328,13 +357,14 @@ func TestEvac(t *testing.T) { validateTableExists(t, tableName) checkTableRows(t, tableName, 1024) } - } - - time.Sleep(tableTransitionExpiration) - // We're now both beyond table's timestamp as well as a tableGC interval - validateTableDoesNotExist(t, tableName) - // Table should be renamed as _vt_DROP_... and then dropped! - validateAnyState(t, 0, schema.DropTableGCState, schema.TableDroppedGCState) + }) + + t.Run("validating rows evacuated", func(t *testing.T) { + // We're now both beyond table's timestamp as well as a tableGC interval + validateTableDoesNotExist(t, tableName) + // Table should be renamed as _vt_DROP_... and then dropped! + validateAnyState(t, 0, schema.DropTableGCState, schema.TableDroppedGCState) + }) } func TestDrop(t *testing.T) { diff --git a/go/timer/suspendable_ticker.go b/go/timer/suspendable_ticker.go index 5257626b85f..f2694a5cab3 100644 --- a/go/timer/suspendable_ticker.go +++ b/go/timer/suspendable_ticker.go @@ -28,7 +28,7 @@ type SuspendableTicker struct { // C is user facing C chan time.Time - suspended int64 + suspended atomic.Bool } // NewSuspendableTicker creates a new suspendable ticker, indicating whether the ticker should start @@ -39,7 +39,7 @@ func NewSuspendableTicker(d time.Duration, initiallySuspended bool) *Suspendable C: make(chan time.Time), } if initiallySuspended { - s.suspended = 1 + s.suspended.Store(true) } go s.loop() return s @@ -48,12 +48,12 @@ func NewSuspendableTicker(d time.Duration, initiallySuspended bool) *Suspendable // Suspend stops sending time events on the channel C // time events sent during suspended time are lost func (s *SuspendableTicker) Suspend() { - atomic.StoreInt64(&s.suspended, 1) + s.suspended.Store(true) } // Resume re-enables time events on channel C func (s *SuspendableTicker) Resume() { - atomic.StoreInt64(&s.suspended, 0) + s.suspended.Store(false) } // Stop completely stops the timer, like time.Timer @@ -64,15 +64,23 @@ func (s *SuspendableTicker) Stop() { // TickNow generates a tick at this point in time. It may block // if nothing consumes the tick. func (s *SuspendableTicker) TickNow() { - if atomic.LoadInt64(&s.suspended) == 0 { + if !s.suspended.Load() { // not suspended s.C <- time.Now() } } +// TickAfter generates a tick after given duration has passed. +// It runs asynchronously and returns immediately. +func (s *SuspendableTicker) TickAfter(d time.Duration) { + time.AfterFunc(d, func() { + s.TickNow() + }) +} + func (s *SuspendableTicker) loop() { for t := range s.ticker.C { - if atomic.LoadInt64(&s.suspended) == 0 { + if !s.suspended.Load() { // not suspended s.C <- t } diff --git a/go/timer/suspendable_ticker_test.go b/go/timer/suspendable_ticker_test.go new file mode 100644 index 00000000000..64c468a0edc --- /dev/null +++ b/go/timer/suspendable_ticker_test.go @@ -0,0 +1,144 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package timer + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + fastTickerInterval = 10 * time.Millisecond +) + +func TestInitiallySuspended(t *testing.T) { + ctx := context.Background() + t.Run("true", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, true) + defer ticker.Stop() + select { + case <-ticker.C: + assert.Fail(t, "unexpected tick. Was supposed to be suspended") + case <-ctx.Done(): + return + } + }) + t.Run("false", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, false) + defer ticker.Stop() + select { + case <-ticker.C: + return + case <-ctx.Done(): + assert.Fail(t, "unexpected timeout. Expected tick") + } + }) +} + +func TestSuspendableTicker(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, false) + defer ticker.Stop() + + var ticks atomic.Int64 + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticks.Add(1) + } + } + }() + t.Run("ticks running", func(t *testing.T) { + time.Sleep(time.Second) + after := ticks.Load() + assert.Greater(t, after, int64(10)) // should be about 100 + }) + t.Run("ticks suspended", func(t *testing.T) { + ticker.Suspend() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Less(t, after-before, int64(10)) + }) + t.Run("ticks resumed", func(t *testing.T) { + ticker.Resume() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Greater(t, after-before, int64(10)) + }) + t.Run("ticker stopped", func(t *testing.T) { + ticker.Stop() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Less(t, after-before, int64(10)) + }) +} + +func TestSuspendableTickerTick(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ticker := NewSuspendableTicker(time.Hour, false) + defer ticker.Stop() + + var ticks atomic.Int64 + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticks.Add(1) + } + } + }() + t.Run("nothing going on", func(t *testing.T) { + time.Sleep(time.Second) + after := ticks.Load() + assert.Zero(t, after) + }) + t.Run("tick now", func(t *testing.T) { + before := ticks.Load() + ticker.TickNow() + time.Sleep(time.Second) + after := ticks.Load() + assert.Equal(t, int64(1), after-before) + }) + t.Run("tick after", func(t *testing.T) { + before := ticks.Load() + ticker.TickAfter(1 * time.Second) + time.Sleep(time.Second) + after := ticks.Load() + assert.Zero(t, after-before) + time.Sleep(3 * time.Second) + after = ticks.Load() + assert.Equal(t, int64(1), after-before) + }) +} diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index dca3186f7b3..2c78069e962 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -176,6 +176,7 @@ type Executor struct { ts *topo.Server lagThrottler *throttle.Throttler toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool) + requestGCChecksFunc func() tabletAlias *topodatapb.TabletAlias keyspace string @@ -251,6 +252,7 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top lagThrottler *throttle.Throttler, tabletTypeFunc func() topodatapb.TabletType, toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool), + requestGCChecksFunc func(), ) *Executor { // sanitize flags if maxConcurrentOnlineDDLs < 1 { @@ -268,6 +270,7 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top ts: ts, lagThrottler: lagThrottler, toggleBufferTableFunc: toggleBufferTableFunc, + requestGCChecksFunc: requestGCChecksFunc, ticks: timer.NewTimer(migrationCheckInterval), // Gracefully return an error if any caller tries to execute // a query before the executor has been fully opened. @@ -3932,6 +3935,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { if err == nil { // artifact was renamed away and is gone. There' no need to list it in `artifacts` column. e.clearSingleArtifact(ctx, uuid, artifactTable) + e.requestGCChecksFunc() } else { return vterrors.Wrapf(err, "in gcArtifacts() for %s", artifactTable) } @@ -4502,6 +4506,7 @@ func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *s return nil, err } log.Infof("CleanupMigration: migration %s marked as ready to clean up", uuid) + defer e.triggerNextCheckInterval() return rs, nil } diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 8658d7c3a3b..d8d12611e43 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -49,9 +49,12 @@ const ( ) var ( - checkInterval = 1 * time.Hour - purgeReentranceInterval = 1 * time.Minute - gcLifecycle = "hold,purge,evac,drop" + checkInterval = 1 * time.Hour + purgeReentranceInterval = 1 * time.Minute + nextPurgeReentry = 1 * time.Second + checkTablesReentryMinInterval = 10 * time.Second + NextChecksIntervals = []time.Duration{time.Second, checkTablesReentryMinInterval + 5*time.Second} + gcLifecycle = "hold,purge,evac,drop" ) func init() { @@ -69,11 +72,10 @@ func registerGCFlags(fs *pflag.FlagSet) { } var ( - sqlPurgeTable = `delete from %a limit 50` - sqlShowVtTables = `show full tables like '\_vt\_%'` - sqlDropTable = "drop table if exists `%a`" - sqlDropView = "drop view if exists `%a`" - purgeReentranceFlag int64 + sqlPurgeTable = `delete from %a limit 50` + sqlShowVtTables = `show full tables like '\_vt\_%'` + sqlDropTable = "drop table if exists `%a`" + sqlDropView = "drop view if exists `%a`" ) type gcTable struct { @@ -105,6 +107,10 @@ type TableGC struct { isOpen int64 cancelOperation context.CancelFunc + purgeReentranceFlag atomic.Int64 + readReentranceFlag atomic.Int64 + checkRequestChan chan bool + throttlerClient *throttle.Client env tabletenv.Env @@ -143,7 +149,8 @@ func NewTableGC(env tabletenv.Env, ts *topo.Server, lagThrottler *throttle.Throt IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, }), - purgingTables: map[string]bool{}, + purgingTables: map[string]bool{}, + checkRequestChan: make(chan bool), } return collector @@ -226,6 +233,16 @@ func (collector *TableGC) Close() { log.Infof("TableGC - finished execution of Close") } +// RequestChecks requests that the GC will do a table check right away, as well as in a few seconds. +// Calling this function is useful to modules that are performing operations that affect GC tables. Those modules +// _know_ that changes have been made, and now have a way to tell TableGC: "please take a look asap rather +// than in the next hour". +func (collector *TableGC) RequestChecks() { + for _, d := range NextChecksIntervals { + time.AfterFunc(d, func() { collector.checkRequestChan <- true }) + } +} + // operate is the main entry point for the table garbage collector operation and logic. func (collector *TableGC) operate(ctx context.Context) { @@ -254,55 +271,47 @@ func (collector *TableGC) operate(ctx context.Context) { case <-ctx.Done(): log.Info("TableGC: done operating") return + case <-collector.checkRequestChan: + // Got a request to check tables. Probably some event took place and we will + // find something new to do. + go tableCheckTicker.TickNow() case <-tableCheckTicker.C: - { - log.Info("TableGC: tableCheckTicker") - if gcTables, err := collector.readTables(ctx); err != nil { - log.Errorf("TableGC: error while reading tables: %+v", err) - } else { - _ = collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan) - } + log.Info("TableGC: tableCheckTicker") + if err := collector.readAndCheckTables(ctx, dropTablesChan, transitionRequestsChan); err != nil { + log.Error(err) } case <-purgeReentranceTicker.C: - { - // relay the request - go func() { purgeRequestsChan <- true }() - } + // relay the request + go func() { purgeRequestsChan <- true }() case <-purgeRequestsChan: - { - go func() { - tableName, err := collector.purge(ctx) - if err != nil { - log.Errorf("TableGC: error purging table %s: %+v", tableName, err) - return - } - if tableName == "" { - // No table purged (or at least not to completion) - // Either because there _is_ nothing to purge, or because PURGE isn't a handled state - return - } - // The table has been purged! Let's move the table into the next phase: - _, _, uuid, _, _ := schema.AnalyzeGCTableName(tableName) - collector.submitTransitionRequest(ctx, transitionRequestsChan, schema.PurgeTableGCState, tableName, true, uuid) - collector.removePurgingTable(tableName) - // Chances are, there's more tables waiting to be purged. Let's speed things by - // requesting another purge, instead of waiting a full purgeReentranceInterval cycle - time.AfterFunc(time.Second, func() { purgeRequestsChan <- true }) - }() - } - case dropTable := <-dropTablesChan: - { - log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) - if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { - log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) + go func() { + tableName, err := collector.purge(ctx) + if err != nil { + log.Errorf("TableGC: error purging table %s: %+v", tableName, err) + return + } + if tableName == "" { + // No table purged (or at least not to completion) + // Either because there _is_ nothing to purge, or because PURGE isn't a handled state + return } + // The table has been purged! Let's move the table into the next phase: + _, _, uuid, _, _ := schema.AnalyzeGCTableName(tableName) + collector.submitTransitionRequest(ctx, transitionRequestsChan, schema.PurgeTableGCState, tableName, true, uuid) + collector.removePurgingTable(tableName) + // Chances are, there's more tables waiting to be purged. Let's speed things by + // requesting another purge, instead of waiting a full purgeReentranceInterval cycle + purgeReentranceTicker.TickAfter(nextPurgeReentry) + }() + case dropTable := <-dropTablesChan: + log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) + if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { + log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) } case transition := <-transitionRequestsChan: - { - log.Info("TableGC: transitionRequestsChan, transition=%v", transition) - if err := collector.transitionTable(ctx, transition); err != nil { - log.Errorf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err) - } + log.Info("TableGC: transitionRequestsChan, transition=%v", transition) + if err := collector.transitionTable(ctx, transition); err != nil { + log.Errorf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err) } } } @@ -378,6 +387,32 @@ func (collector *TableGC) shouldTransitionTable(tableName string) (shouldTransit return true, state, uuid, nil } +// readAndCheckTables is the routine check for which GC tables exist, and which of those need to transition +// into the next state. The function is non-reentrant, and poses a minimal duration between any two executions. +func (collector *TableGC) readAndCheckTables( + ctx context.Context, + dropTablesChan chan<- *gcTable, + transitionRequestsChan chan<- *transitionRequest, +) (err error) { + if !collector.readReentranceFlag.CompareAndSwap(0, 1) { + // An instance of this function is already running + return nil + } + defer time.AfterFunc(checkTablesReentryMinInterval, func() { + collector.readReentranceFlag.Store(0) + }) + + log.Info("TableGC: readAndCheckTables") + gcTables, err := collector.readTables(ctx) + if err != nil { + return fmt.Errorf("TableGC: error while reading tables: %+v", err) + } + if err := collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan); err != nil { + return err + } + return nil +} + // readTables reads the list of _vt_% tables from the database func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, err error) { log.Infof("TableGC: read tables") @@ -457,12 +492,11 @@ func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, // This function is non-reentrant: there's only one instance of this function running at any given time. // A timer keeps calling this function, so if it bails out (e.g. on error) it will later resume work func (collector *TableGC) purge(ctx context.Context) (tableName string, err error) { - if atomic.CompareAndSwapInt64(&purgeReentranceFlag, 0, 1) { - defer atomic.StoreInt64(&purgeReentranceFlag, 0) - } else { + if !collector.purgeReentranceFlag.CompareAndSwap(0, 1) { // An instance of this function is already running return "", nil } + defer collector.purgeReentranceFlag.Store(0) tableName, found := collector.nextTableToPurge() if !found { @@ -598,6 +632,9 @@ func (collector *TableGC) transitionTable(ctx context.Context, transition *trans return err } log.Infof("TableGC: renamed table: %s", transition.fromTableName) + // Since the table has transitioned, there is a potential for more work on this table or on other tables, + // let's kick a check request. + collector.RequestChecks() return nil } diff --git a/go/vt/vttablet/tabletserver/gc/tablegc_test.go b/go/vt/vttablet/tabletserver/gc/tablegc_test.go index 446f6e6ff85..12ee5e2a28b 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc_test.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc_test.go @@ -60,7 +60,8 @@ func TestNextTableToPurge(t *testing.T) { } for _, ts := range tt { collector := &TableGC{ - purgingTables: make(map[string]bool), + purgingTables: make(map[string]bool), + checkRequestChan: make(chan bool), } for _, table := range ts.tables { collector.purgingTables[table] = true @@ -256,8 +257,9 @@ func TestShouldTransitionTable(t *testing.T) { func TestCheckTables(t *testing.T) { collector := &TableGC{ - isOpen: 0, - purgingTables: map[string]bool{}, + isOpen: 0, + purgingTables: map[string]bool{}, + checkRequestChan: make(chan bool), } var err error collector.lifecycleStates, err = schema.ParseGCLifecycle("hold,purge,evac,drop") diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 2e1831e0acd..3b0ac598ad0 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -187,8 +187,8 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC tsv.te = NewTxEngine(tsv) tsv.messager = messager.NewEngine(tsv, tsv.se, tsv.vstreamer) - tsv.onlineDDLExecutor = onlineddl.NewExecutor(tsv, alias, topoServer, tsv.lagThrottler, tabletTypeFunc, tsv.onlineDDLExecutorToggleTableBuffer) tsv.tableGC = gc.NewTableGC(tsv, topoServer, tsv.lagThrottler) + tsv.onlineDDLExecutor = onlineddl.NewExecutor(tsv, alias, topoServer, tsv.lagThrottler, tabletTypeFunc, tsv.onlineDDLExecutorToggleTableBuffer, tsv.tableGC.RequestChecks) tsv.sm = &stateManager{ statelessql: tsv.statelessql, From 248cce3672a679aa7324b0d4bc87280c8bb4c7f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:27:36 -0600 Subject: [PATCH 30/33] Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /web/vtadmin (#14654) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/vtadmin/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index 94044b24a27..ad72ae9dc2f 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -72,9 +72,9 @@ } }, "node_modules/@adobe/css-tools": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", - "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.2.tgz", + "integrity": "sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw==", "dev": true }, "node_modules/@ampproject/remapping": { @@ -17600,9 +17600,9 @@ }, "dependencies": { "@adobe/css-tools": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", - "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.2.tgz", + "integrity": "sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw==", "dev": true }, "@ampproject/remapping": { From 8520049a1b1872f519ba1a1ae0ad2f3c006b6246 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Mon, 4 Dec 2023 23:11:44 +0530 Subject: [PATCH 31/33] Add summary changes to indicate MySQL 5.7 is EOL and Vitess is dropping support for it in v19 (#14663) --- changelog/19.0/19.0.0/summary.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/changelog/19.0/19.0.0/summary.md b/changelog/19.0/19.0.0/summary.md index 5580bc6e6f5..446b4b32bc2 100644 --- a/changelog/19.0/19.0.0/summary.md +++ b/changelog/19.0/19.0.0/summary.md @@ -3,6 +3,7 @@ ### Table of Contents - **[Major Changes](#major-changes)** + - **[Dropping Support for MySQL 5.7](#drop-support-mysql57)** - **[Deprecations and Deletions](#deprecations-and-deletions)** - [VTTablet Flags](#vttablet-flags) - **[Docker](#docker)** @@ -14,6 +15,13 @@ ## Major Changes +### Dropping Support for MySQL 5.7 + +Oracle has marked MySQL 5.7 end of life as of October 2023. Vitess is also dropping support for MySQL 5.7 from v19 onwards. Users are advised to upgrade to MySQL 8.0 while on v18 version of Vitess before +upgrading to v19. + +Vitess will however, continue to support importing from MySQL 5.7 into Vitess even in v19. + ### Deprecations and Deletions - The `MYSQL_FLAVOR` environment variable is now removed from all Docker Images. From 68207425b5e2cfb01d907adc7dba1f05839848e0 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:55:18 +0530 Subject: [PATCH 32/33] Fix Panic in PRS due to a missing nil check (#14656) Signed-off-by: Manan Gupta --- go/vt/vtctl/grpcvtctldserver/server.go | 2 +- go/vt/vtctl/grpcvtctldserver/server_test.go | 75 +++++++++++++++++++-- 2 files changed, 70 insertions(+), 7 deletions(-) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index caa99d5e8c8..e54ee0fa96c 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -2752,7 +2752,7 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap resp.Keyspace = ev.ShardInfo.Keyspace() resp.Shard = ev.ShardInfo.ShardName() - if !topoproto.TabletAliasIsZero(ev.NewPrimary.Alias) { + if ev.NewPrimary != nil && !topoproto.TabletAliasIsZero(ev.NewPrimary.Alias) { resp.PromotedPrimary = ev.NewPrimary.Alias } } diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index d597ec49502..c04114d46cd 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -7439,7 +7439,7 @@ func TestPlannedReparentShard(t *testing.T) { req *vtctldatapb.PlannedReparentShardRequest expected *vtctldatapb.PlannedReparentShardResponse expectEventsToOccur bool - shouldErr bool + expectedErr string }{ { name: "successful reparent", @@ -7554,7 +7554,6 @@ func TestPlannedReparentShard(t *testing.T) { }, }, expectEventsToOccur: true, - shouldErr: false, }, { // Note: this is testing the error-handling done in @@ -7570,7 +7569,7 @@ func TestPlannedReparentShard(t *testing.T) { Shard: "-", }, expectEventsToOccur: false, - shouldErr: true, + expectedErr: "node doesn't exist: keyspaces/testkeyspace/shards/-", }, { name: "invalid WaitReplicasTimeout", @@ -7580,7 +7579,71 @@ func TestPlannedReparentShard(t *testing.T) { Nanos: 1, }, }, - shouldErr: true, + expectedErr: "duration: seconds:-1 nanos:1 is out of range for time.Duration", + }, + { + name: "tablet unreachable", + ts: memorytopo.NewServer(ctx, "zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + PrimaryTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Error: fmt.Errorf("primary status failed"), + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: protoutil.DurationToProto(time.Millisecond * 10), + }, + expectEventsToOccur: true, + expectedErr: "primary status failed", }, } @@ -7610,8 +7673,8 @@ func TestPlannedReparentShard(t *testing.T) { testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") }() - if tt.shouldErr { - assert.Error(t, err) + if tt.expectedErr != "" { + assert.EqualError(t, err, tt.expectedErr) return } From 36d8d3657f16515c5806c3cb1a2b4ab6d0443327 Mon Sep 17 00:00:00 2001 From: Manan Gupta <35839558+GuptaManan100@users.noreply.github.com> Date: Tue, 5 Dec 2023 12:15:39 +0530 Subject: [PATCH 33/33] Take replication lag into account while selecting primary candidate (#14634) Signed-off-by: Manan Gupta --- changelog/19.0/19.0.0/summary.md | 12 + go/cmd/vtctldclient/command/reparents.go | 19 +- go/flags/endtoend/vtorc.txt | 1 + go/vt/proto/vtctldata/vtctldata.pb.go | 1875 +++++++++-------- go/vt/proto/vtctldata/vtctldata_vtproto.pb.go | 61 +- go/vt/vtctl/grpcvtctldserver/server.go | 5 + go/vt/vtctl/reparent.go | 3 +- .../vtctl/reparentutil/planned_reparenter.go | 3 +- go/vt/vtctl/reparentutil/util.go | 17 +- go/vt/vtctl/reparentutil/util_test.go | 144 +- go/vt/vtorc/config/config.go | 4 + go/vt/vtorc/logic/topology_recovery.go | 1 + go/vt/wrangler/reparent.go | 8 +- proto/vtctldata.proto | 4 + web/vtadmin/src/proto/vtadmin.d.ts | 6 + web/vtadmin/src/proto/vtadmin.js | 28 + 16 files changed, 1231 insertions(+), 960 deletions(-) diff --git a/changelog/19.0/19.0.0/summary.md b/changelog/19.0/19.0.0/summary.md index 446b4b32bc2..af9c83399a3 100644 --- a/changelog/19.0/19.0.0/summary.md +++ b/changelog/19.0/19.0.0/summary.md @@ -12,6 +12,8 @@ - [`FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable](#fk-checks-vitess-aware) - **[Query Compatibility](#query-compatibility)** - [`SHOW VSCHEMA KEYSPACES` Query](#show-vschema-keyspaces) + - **[Planned Reparent Shard](#planned-reparent-shard)** + - [`--tolerable-replication-lag` Sub-flag](#tolerable-repl-lag) ## Major Changes @@ -66,3 +68,13 @@ mysql> show vschema keyspaces; +----------+---------+-------------+---------+ 2 rows in set (0.01 sec) ``` + +### Planned Reparent Shard + +#### `--tolerable-replication-lag` Sub-flag + +A new sub-flag `--tolerable-replication-lag` has been added to the command `PlannedReparentShard` that allows users to specify the amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. +This feature is opt-in and not specifying this sub-flag makes Vitess ignore the replication lag entirely. + +A new flag in VTOrc with the same name has been added to control the behaviour of the PlannedReparentShard calls that VTOrc issues. + diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index 5c83016701a..17b87eaba4f 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -183,9 +183,10 @@ func commandInitShardPrimary(cmd *cobra.Command, args []string) error { } var plannedReparentShardOptions = struct { - NewPrimaryAliasStr string - AvoidPrimaryAliasStr string - WaitReplicasTimeout time.Duration + NewPrimaryAliasStr string + AvoidPrimaryAliasStr string + WaitReplicasTimeout time.Duration + TolerableReplicationLag time.Duration }{} func commandPlannedReparentShard(cmd *cobra.Command, args []string) error { @@ -216,11 +217,12 @@ func commandPlannedReparentShard(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) resp, err := client.PlannedReparentShard(commandCtx, &vtctldatapb.PlannedReparentShardRequest{ - Keyspace: keyspace, - Shard: shard, - NewPrimary: newPrimaryAlias, - AvoidPrimary: avoidPrimaryAlias, - WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout), + Keyspace: keyspace, + Shard: shard, + NewPrimary: newPrimaryAlias, + AvoidPrimary: avoidPrimaryAlias, + WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout), + TolerableReplicationLag: protoutil.DurationToProto(plannedReparentShardOptions.TolerableReplicationLag), }) if err != nil { return err @@ -292,6 +294,7 @@ func init() { Root.AddCommand(InitShardPrimary) PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up on replication both before and after reparenting.") + PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.TolerableReplicationLag, "tolerable-replication-lag", 0, "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary.") PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary.") PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.AvoidPrimaryAliasStr, "avoid-primary", "", "Alias of a tablet that should not be the primary; i.e. \"reparent to any other tablet if this one is the primary\".") Root.AddCommand(PlannedReparentShard) diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index 0bbe8ef7469..8ea30e2ff10 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -86,6 +86,7 @@ Flags: --tablet_manager_grpc_key string the key to use to connect --tablet_manager_grpc_server_name string the server name to use to validate server certificate --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") + --tolerable-replication-lag duration Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s) --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index fafa6d703cc..f7252e2d8ec 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -8704,6 +8704,10 @@ type PlannedReparentShardRequest struct { // WaitReplicasTimeout time to catch up before the reparent, and an additional // WaitReplicasTimeout time to catch up after the reparent. WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // TolerableReplicationLag is the amount of replication lag that is considered + // acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. + // A value of 0 indicates that Vitess shouldn't consider the replication lag at all. + TolerableReplicationLag *vttime.Duration `protobuf:"bytes,6,opt,name=tolerable_replication_lag,json=tolerableReplicationLag,proto3" json:"tolerable_replication_lag,omitempty"` } func (x *PlannedReparentShardRequest) Reset() { @@ -8773,6 +8777,13 @@ func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration return nil } +func (x *PlannedReparentShardRequest) GetTolerableReplicationLag() *vttime.Duration { + if x != nil { + return x.TolerableReplicationLag + } + return nil +} + type PlannedReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -16795,7 +16806,7 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd7, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, @@ -16812,591 +16823,531 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, - 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x19, 0x74, 0x6f, + 0x6c, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x17, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, + 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, + 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, + 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, + 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, + 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, + 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, - 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, - 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, - 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, - 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, - 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, - 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, - 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, - 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, - 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, - 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, - 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, 0x04, 0x0a, 0x14, 0x52, 0x65, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x63, 0x6f, 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x6b, 0x69, 0x70, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, - 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, - 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, - 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x18, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, - 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, - 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, - 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, - 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, - 0x4d, 0x0a, 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdd, - 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x75, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x40, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, - 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, - 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, - 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, - 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, - 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, - 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, - 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, - 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, + 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, 0x04, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, + 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, + 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x18, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, + 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, + 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xad, 0x01, + 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, + 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdd, 0x01, 0x0a, + 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, + 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x15, + 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, + 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, + 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, + 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, - 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, + 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, + 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, - 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, - 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, - 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, - 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, - 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, - 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, - 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, + 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, + 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, + 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, + 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, + 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, + 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, + 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, + 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, + 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, + 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, + 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, + 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, - 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, + 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, - 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, + 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, @@ -17405,272 +17356,337 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x12, 0x55, 0x0a, 0x1e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x09, 0x6f, 0x6e, - 0x6c, 0x79, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, - 0x6e, 0x6c, 0x79, 0x50, 0x4b, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x77, 0x61, 0x69, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x61, - 0x69, 0x74, 0x12, 0x42, 0x0a, 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x61, 0x69, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, - 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x6f, 0x77, 0x73, 0x22, 0x29, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, - 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, - 0x6b, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, 0x15, 0x0a, 0x13, - 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x61, 0x72, 0x67, 0x22, 0xd7, 0x01, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, - 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x1a, 0x64, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, - 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, - 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x56, - 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x9a, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, - 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, + 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x98, 0x01, + 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, + 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x60, + 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x55, 0x0a, 0x1e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x09, 0x6f, 0x6e, 0x6c, 0x79, + 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, + 0x79, 0x50, 0x4b, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, + 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, + 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x77, 0x61, 0x69, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x61, 0x69, 0x74, + 0x12, 0x42, 0x0a, 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x77, 0x61, 0x69, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x33, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6d, + 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, + 0x77, 0x73, 0x22, 0x29, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, 0x6b, 0x0a, + 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, + 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x6d, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, + 0x72, 0x67, 0x22, 0xd7, 0x01, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x1a, 0x64, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x10, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, + 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, + 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x22, 0x4f, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, + 0x4f, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x22, 0xe6, 0x07, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, + 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, 0x0a, 0x0d, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, + 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, + 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xe8, 0x01, 0x0a, 0x0e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x44, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x03, 0x0a, 0x1c, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x22, 0xe6, 0x07, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, - 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x66, - 0x66, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xe8, 0x01, - 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, - 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, - 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, - 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x44, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x03, 0x0a, 0x1c, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, - 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, - 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, - 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, - 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, - 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, - 0x90, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, - 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, - 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, - 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, - 0x10, 0x02, 0x2a, 0x38, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x4f, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x76, + 0x65, 0x72, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, + 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, + 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, + 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x90, 0x01, + 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, + 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x56, + 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x10, 0x02, + 0x2a, 0x38, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, + 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, + 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, + 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -18107,106 +18123,107 @@ var file_vtctldata_proto_depIdxs = []int32{ 275, // 109: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias 275, // 110: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias 276, // 111: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 275, // 112: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 271, // 113: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event - 275, // 114: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 115: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 271, // 116: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event - 271, // 117: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 275, // 118: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias - 275, // 119: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias - 283, // 120: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 121: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 275, // 122: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 274, // 123: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 274, // 124: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time - 275, // 125: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 271, // 126: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 255, // 127: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry - 275, // 128: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias - 273, // 129: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace - 283, // 130: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType - 273, // 131: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace - 273, // 132: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace - 277, // 133: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard - 283, // 134: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType - 277, // 135: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard - 275, // 136: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 137: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias - 296, // 138: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError - 256, // 139: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - 257, // 140: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - 275, // 141: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 142: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 276, // 143: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration - 297, // 144: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange - 277, // 145: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard - 277, // 146: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard - 275, // 147: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 148: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 149: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias - 275, // 150: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias - 275, // 151: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias - 278, // 152: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 278, // 153: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 298, // 154: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias - 298, // 155: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias - 258, // 156: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry - 259, // 157: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - 260, // 158: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - 261, // 159: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - 262, // 160: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - 283, // 161: vtctldata.VDiffCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 162: vtctldata.VDiffCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 276, // 163: vtctldata.VDiffCreateRequest.filtered_replication_wait_time:type_name -> vttime.Duration - 276, // 164: vtctldata.VDiffCreateRequest.wait_update_interval:type_name -> vttime.Duration - 263, // 165: vtctldata.VDiffShowResponse.tablet_responses:type_name -> vtctldata.VDiffShowResponse.TabletResponsesEntry - 264, // 166: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo - 268, // 167: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry - 269, // 168: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry - 283, // 169: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType - 276, // 170: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration - 276, // 171: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration - 299, // 172: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest - 270, // 173: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo - 238, // 174: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream - 239, // 175: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream - 300, // 176: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl - 275, // 177: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias - 301, // 178: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource - 274, // 179: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time - 274, // 180: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time - 240, // 181: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState - 241, // 182: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log - 242, // 183: vtctldata.Workflow.Stream.throttler_status:type_name -> vtctldata.Workflow.Stream.ThrottlerStatus - 274, // 184: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time - 274, // 185: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time - 274, // 186: vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled:type_name -> vttime.Time - 10, // 187: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard - 298, // 188: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias - 250, // 189: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList - 302, // 190: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace - 295, // 191: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema - 275, // 192: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 303, // 193: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status - 284, // 194: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet - 207, // 195: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse - 211, // 196: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 197: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 198: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 199: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 304, // 200: vtctldata.VDiffShowResponse.TabletResponsesEntry.value:type_name -> tabletmanagerdata.VDiffResponse - 275, // 201: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 275, // 202: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias - 266, // 203: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState - 265, // 204: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState - 267, // 205: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams - 275, // 206: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 207, // [207:207] is the sub-list for method output_type - 207, // [207:207] is the sub-list for method input_type - 207, // [207:207] is the sub-list for extension type_name - 207, // [207:207] is the sub-list for extension extendee - 0, // [0:207] is the sub-list for field type_name + 276, // 112: vtctldata.PlannedReparentShardRequest.tolerable_replication_lag:type_name -> vttime.Duration + 275, // 113: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 271, // 114: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event + 275, // 115: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 116: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 271, // 117: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event + 271, // 118: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 275, // 119: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias + 275, // 120: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias + 283, // 121: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 122: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 275, // 123: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 274, // 124: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 274, // 125: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 275, // 126: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 271, // 127: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 255, // 128: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + 275, // 129: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias + 273, // 130: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace + 283, // 131: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType + 273, // 132: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace + 273, // 133: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace + 277, // 134: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard + 283, // 135: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType + 277, // 136: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard + 275, // 137: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 138: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias + 296, // 139: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError + 256, // 140: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + 257, // 141: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + 275, // 142: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 143: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 276, // 144: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration + 297, // 145: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange + 277, // 146: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard + 277, // 147: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard + 275, // 148: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 149: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 150: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias + 275, // 151: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias + 275, // 152: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias + 278, // 153: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 278, // 154: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 298, // 155: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias + 298, // 156: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias + 258, // 157: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry + 259, // 158: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + 260, // 159: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + 261, // 160: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + 262, // 161: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + 283, // 162: vtctldata.VDiffCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 163: vtctldata.VDiffCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 276, // 164: vtctldata.VDiffCreateRequest.filtered_replication_wait_time:type_name -> vttime.Duration + 276, // 165: vtctldata.VDiffCreateRequest.wait_update_interval:type_name -> vttime.Duration + 263, // 166: vtctldata.VDiffShowResponse.tablet_responses:type_name -> vtctldata.VDiffShowResponse.TabletResponsesEntry + 264, // 167: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo + 268, // 168: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry + 269, // 169: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry + 283, // 170: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType + 276, // 171: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration + 276, // 172: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration + 299, // 173: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 270, // 174: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo + 238, // 175: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream + 239, // 176: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream + 300, // 177: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl + 275, // 178: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias + 301, // 179: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource + 274, // 180: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time + 274, // 181: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time + 240, // 182: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState + 241, // 183: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log + 242, // 184: vtctldata.Workflow.Stream.throttler_status:type_name -> vtctldata.Workflow.Stream.ThrottlerStatus + 274, // 185: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time + 274, // 186: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time + 274, // 187: vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled:type_name -> vttime.Time + 10, // 188: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard + 298, // 189: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias + 250, // 190: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList + 302, // 191: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace + 295, // 192: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema + 275, // 193: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 303, // 194: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status + 284, // 195: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet + 207, // 196: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse + 211, // 197: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 198: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 199: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 200: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 304, // 201: vtctldata.VDiffShowResponse.TabletResponsesEntry.value:type_name -> tabletmanagerdata.VDiffResponse + 275, // 202: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 275, // 203: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias + 266, // 204: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState + 265, // 205: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState + 267, // 206: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams + 275, // 207: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 208, // [208:208] is the sub-list for method output_type + 208, // [208:208] is the sub-list for method input_type + 208, // [208:208] is the sub-list for extension type_name + 208, // [208:208] is the sub-list for extension extendee + 0, // [0:208] is the sub-list for field type_name } func init() { file_vtctldata_proto_init() } diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index 2e589fe06e1..5c8ff40f8f2 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -3110,11 +3110,12 @@ func (m *PlannedReparentShardRequest) CloneVT() *PlannedReparentShardRequest { return (*PlannedReparentShardRequest)(nil) } r := &PlannedReparentShardRequest{ - Keyspace: m.Keyspace, - Shard: m.Shard, - NewPrimary: m.NewPrimary.CloneVT(), - AvoidPrimary: m.AvoidPrimary.CloneVT(), - WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + AvoidPrimary: m.AvoidPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + TolerableReplicationLag: m.TolerableReplicationLag.CloneVT(), } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -13680,6 +13681,16 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.TolerableReplicationLag != nil { + size, err := m.TolerableReplicationLag.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } if m.WaitReplicasTimeout != nil { size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -22488,6 +22499,10 @@ func (m *PlannedReparentShardRequest) SizeVT() (n int) { l = m.WaitReplicasTimeout.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.TolerableReplicationLag != nil { + l = m.TolerableReplicationLag.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -45093,6 +45108,42 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerableReplicationLag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TolerableReplicationLag == nil { + m.TolerableReplicationLag = &vttime.Duration{} + } + if err := m.TolerableReplicationLag.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index e54ee0fa96c..ef0e9db341b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -2711,6 +2711,10 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap } else if !ok { waitReplicasTimeout = time.Second * 30 } + tolerableReplLag, _, err := protoutil.DurationFromProto(req.TolerableReplicationLag) + if err != nil { + return nil, err + } span.Annotate("keyspace", req.Keyspace) span.Annotate("shard", req.Shard) @@ -2740,6 +2744,7 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap AvoidPrimaryAlias: req.AvoidPrimary, NewPrimaryAlias: req.NewPrimary, WaitReplicasTimeout: waitReplicasTimeout, + TolerableReplLag: tolerableReplLag, }, ) diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 7ed0f6582b9..cd6cfe3fb6d 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -114,6 +114,7 @@ func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, sub } waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", topo.RemoteOperationTimeout, "time to wait for replicas to catch up on replication before and after reparenting") + tolerableReplicationLag := subFlags.Duration("tolerable-replication-lag", 0, "amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary") keyspaceShard := subFlags.String("keyspace_shard", "", "keyspace/shard of the shard that needs to be reparented") newPrimary := subFlags.String("new_primary", "", "alias of a tablet that should be the new primary") avoidTablet := subFlags.String("avoid_tablet", "", "alias of a tablet that should not be the primary, i.e. reparent to any other tablet if this one is the primary") @@ -149,7 +150,7 @@ func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, sub return err } } - return wr.PlannedReparentShard(ctx, keyspace, shard, newPrimaryAlias, avoidTabletAlias, *waitReplicasTimeout) + return wr.PlannedReparentShard(ctx, keyspace, shard, newPrimaryAlias, avoidTabletAlias, *waitReplicasTimeout, *tolerableReplicationLag) } func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 9fc933a8e35..4109386e8d1 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -60,6 +60,7 @@ type PlannedReparentOptions struct { NewPrimaryAlias *topodatapb.TabletAlias AvoidPrimaryAlias *topodatapb.TabletAlias WaitReplicasTimeout time.Duration + TolerableReplLag time.Duration // Private options managed internally. We use value-passing semantics to // set these options inside a PlannedReparent without leaking these details @@ -179,7 +180,7 @@ func (pr *PlannedReparenter) preflightChecks( event.DispatchUpdate(ev, "searching for primary candidate") - opts.NewPrimaryAlias, err = ChooseNewPrimary(ctx, pr.tmc, &ev.ShardInfo, tabletMap, opts.AvoidPrimaryAlias, opts.WaitReplicasTimeout, opts.durability, pr.logger) + opts.NewPrimaryAlias, err = ChooseNewPrimary(ctx, pr.tmc, &ev.ShardInfo, tabletMap, opts.AvoidPrimaryAlias, opts.WaitReplicasTimeout, opts.TolerableReplLag, opts.durability, pr.logger) if err != nil { return true, err } diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index ac57e1230d1..db260cc36b6 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -65,6 +65,7 @@ func ChooseNewPrimary( tabletMap map[string]*topo.TabletInfo, avoidPrimaryAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, + tolerableReplLag time.Duration, durability Durabler, // (TODO:@ajm188) it's a little gross we need to pass this, maybe embed in the context? logger logutil.Logger, @@ -97,10 +98,10 @@ func ChooseNewPrimary( tb := tablet.Tablet errorGroup.Go(func() error { // find and store the positions for the tablet - pos, err := findPositionForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) + pos, replLag, err := findPositionAndLagForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) mu.Lock() defer mu.Unlock() - if err == nil { + if err == nil && (tolerableReplLag == 0 || tolerableReplLag >= replLag) { validTablets = append(validTablets, tb) tabletPositions = append(tabletPositions, pos) } @@ -127,9 +128,9 @@ func ChooseNewPrimary( return validTablets[0].Alias, nil } -// findPositionForTablet processes the replication position for a single tablet and +// findPositionAndLagForTablet processes the replication position and lag for a single tablet and // returns it. It is safe to call from multiple goroutines. -func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, error) { +func findPositionAndLagForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, error) { logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) ctx, cancel := context.WithTimeout(ctx, waitTimeout) @@ -140,10 +141,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias)) - return replication.Position{}, nil + return replication.Position{}, 0, nil } logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) - return replication.Position{}, err + return replication.Position{}, 0, err } // Use the relay log position if available, otherwise use the executed GTID set (binary log position). @@ -154,10 +155,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge pos, err := replication.DecodePosition(positionString) if err != nil { logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err) - return replication.Position{}, err + return replication.Position{}, 0, err } - return pos, nil + return pos, time.Second * time.Duration(status.ReplicationLagSeconds), nil } // FindCurrentPrimary returns the current primary tablet of a shard, if any. The diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index 7b0d624590f..8e48bbb3303 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -72,22 +72,85 @@ func TestChooseNewPrimary(t *testing.T) { shardInfo *topo.ShardInfo tabletMap map[string]*topo.TabletInfo avoidPrimaryAlias *topodatapb.TabletAlias + tolerableReplLag time.Duration expected *topodatapb.TabletAlias shouldErr bool }{ { name: "found a replica", tmc: &chooseNewPrimaryTestTMClient{ - // zone1-101 is behind zone1-102 + // zone1-101 is behind zone1-102 and zone1-102 has a tolerable replication lag replicationStatuses: map[string]*replicationdatapb.Status{ "zone1-0000000101": { Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", }, "zone1-0000000102": { - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 20, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + shouldErr: false, + }, + { + name: "found a replica ignoring replica lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 and we don't care about the replication lag + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 230, }, }, }, + tolerableReplLag: 0, shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ PrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -133,6 +196,66 @@ func TestChooseNewPrimary(t *testing.T) { }, shouldErr: false, }, + { + name: "found a replica - ignore one with replication lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 232, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + shouldErr: false, + }, { name: "found a replica - more advanced relay log position", tmc: &chooseNewPrimaryTestTMClient{ @@ -443,7 +566,7 @@ func TestChooseNewPrimary(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - actual, err := ChooseNewPrimary(ctx, tt.tmc, tt.shardInfo, tt.tabletMap, tt.avoidPrimaryAlias, time.Millisecond*50, durability, logger) + actual, err := ChooseNewPrimary(ctx, tt.tmc, tt.shardInfo, tt.tabletMap, tt.avoidPrimaryAlias, time.Millisecond*50, tt.tolerableReplLag, durability, logger) if tt.shouldErr { assert.Error(t, err) return @@ -465,6 +588,7 @@ func TestFindPositionForTablet(t *testing.T) { tmc *testutil.TabletManagerClient tablet *topodatapb.Tablet expectedPosition string + expectedLag time.Duration expectedErr string }{ { @@ -476,7 +600,8 @@ func TestFindPositionForTablet(t *testing.T) { }{ "zone1-0000000100": { Position: &replicationdatapb.Status{ - Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + ReplicationLagSeconds: 201, }, }, }, @@ -487,6 +612,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 201 * time.Second, expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", }, { name: "no replication status", @@ -506,6 +632,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 0, expectedPosition: "", }, { name: "relay log", @@ -516,8 +643,9 @@ func TestFindPositionForTablet(t *testing.T) { }{ "zone1-0000000100": { Position: &replicationdatapb.Status{ - Position: "unused", - RelayLogPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + Position: "unused", + RelayLogPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + ReplicationLagSeconds: 291, }, }, }, @@ -528,6 +656,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 291 * time.Second, expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", }, { name: "error in parsing position", @@ -555,7 +684,7 @@ func TestFindPositionForTablet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - pos, err := findPositionForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second) + pos, lag, err := findPositionAndLagForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second) if test.expectedErr != "" { require.EqualError(t, err, test.expectedErr) return @@ -563,6 +692,7 @@ func TestFindPositionForTablet(t *testing.T) { require.NoError(t, err) posString := replication.EncodePosition(pos) require.Equal(t, test.expectedPosition, posString) + require.Equal(t, test.expectedLag, lag) }) } } diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go index 03067d289ac..ba3c41ddc61 100644 --- a/go/vt/vtorc/config/config.go +++ b/go/vt/vtorc/config/config.go @@ -59,6 +59,7 @@ var ( recoveryPeriodBlockDuration = 30 * time.Second preventCrossCellFailover = false waitReplicasTimeout = 30 * time.Second + tolerableReplicationLag = 0 * time.Second topoInformationRefreshDuration = 15 * time.Second recoveryPollDuration = 1 * time.Second ersEnabled = true @@ -78,6 +79,7 @@ func RegisterFlags(fs *pflag.FlagSet) { fs.DurationVar(&recoveryPeriodBlockDuration, "recovery-period-block-duration", recoveryPeriodBlockDuration, "Duration for which a new recovery is blocked on an instance after running a recovery") fs.BoolVar(&preventCrossCellFailover, "prevent-cross-cell-failover", preventCrossCellFailover, "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover") fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs") + fs.DurationVar(&tolerableReplicationLag, "tolerable-replication-lag", tolerableReplicationLag, "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS") fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server") fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery") fs.BoolVar(&ersEnabled, "allow-emergency-reparent", ersEnabled, "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary") @@ -100,6 +102,7 @@ type Configuration struct { RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all. WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS. + TolerableReplicationLagSeconds int // Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS. TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server. RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs } @@ -129,6 +132,7 @@ func UpdateConfigValuesFromFlags() { Config.RecoveryPeriodBlockSeconds = int(recoveryPeriodBlockDuration / time.Second) Config.PreventCrossDataCenterPrimaryFailover = preventCrossCellFailover Config.WaitReplicasTimeoutSeconds = int(waitReplicasTimeout / time.Second) + Config.TolerableReplicationLagSeconds = int(tolerableReplicationLag / time.Second) Config.TopoInformationRefreshSeconds = int(topoInformationRefreshDuration / time.Second) Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second) } diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index cf99e34e426..e5168fea541 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -852,6 +852,7 @@ func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysi analyzedTablet.Shard, reparentutil.PlannedReparentOptions{ WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, + TolerableReplLag: time.Duration(config.Config.TolerableReplicationLagSeconds) * time.Second, }, ) diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index fbeec55cbbc..dbad6b2ee29 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -76,7 +76,12 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string // PlannedReparentShard will make the provided tablet the primary for the shard, // when both the current and new primary are reachable and in good shape. -func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias, avoidTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration) (err error) { +func (wr *Wrangler) PlannedReparentShard( + ctx context.Context, + keyspace, shard string, + primaryElectTabletAlias, avoidTabletAlias *topodatapb.TabletAlias, + waitReplicasTimeout, tolerableReplicationLag time.Duration, +) (err error) { _, err = reparentutil.NewPlannedReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, @@ -85,6 +90,7 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st AvoidPrimaryAlias: avoidTabletAlias, NewPrimaryAlias: primaryElectTabletAlias, WaitReplicasTimeout: waitReplicasTimeout, + TolerableReplLag: tolerableReplicationLag, }, ) diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index b84f87c9037..c16f9b5d0ea 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -1211,6 +1211,10 @@ message PlannedReparentShardRequest { // WaitReplicasTimeout time to catch up before the reparent, and an additional // WaitReplicasTimeout time to catch up after the reparent. vttime.Duration wait_replicas_timeout = 5; + // TolerableReplicationLag is the amount of replication lag that is considered + // acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. + // A value of 0 indicates that Vitess shouldn't consider the replication lag at all. + vttime.Duration tolerable_replication_lag = 6; } message PlannedReparentShardResponse { diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 11489a0acc2..d19221fe676 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -57419,6 +57419,9 @@ export namespace vtctldata { /** PlannedReparentShardRequest wait_replicas_timeout */ wait_replicas_timeout?: (vttime.IDuration|null); + + /** PlannedReparentShardRequest tolerable_replication_lag */ + tolerable_replication_lag?: (vttime.IDuration|null); } /** Represents a PlannedReparentShardRequest. */ @@ -57445,6 +57448,9 @@ export namespace vtctldata { /** PlannedReparentShardRequest wait_replicas_timeout. */ public wait_replicas_timeout?: (vttime.IDuration|null); + /** PlannedReparentShardRequest tolerable_replication_lag. */ + public tolerable_replication_lag?: (vttime.IDuration|null); + /** * Creates a new PlannedReparentShardRequest instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 71d67a61ed1..7de0fde463a 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -140320,6 +140320,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {topodata.ITabletAlias|null} [new_primary] PlannedReparentShardRequest new_primary * @property {topodata.ITabletAlias|null} [avoid_primary] PlannedReparentShardRequest avoid_primary * @property {vttime.IDuration|null} [wait_replicas_timeout] PlannedReparentShardRequest wait_replicas_timeout + * @property {vttime.IDuration|null} [tolerable_replication_lag] PlannedReparentShardRequest tolerable_replication_lag */ /** @@ -140377,6 +140378,14 @@ export const vtctldata = $root.vtctldata = (() => { */ PlannedReparentShardRequest.prototype.wait_replicas_timeout = null; + /** + * PlannedReparentShardRequest tolerable_replication_lag. + * @member {vttime.IDuration|null|undefined} tolerable_replication_lag + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.tolerable_replication_lag = null; + /** * Creates a new PlannedReparentShardRequest instance using the specified properties. * @function create @@ -140411,6 +140420,8 @@ export const vtctldata = $root.vtctldata = (() => { $root.topodata.TabletAlias.encode(message.avoid_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.tolerable_replication_lag != null && Object.hasOwnProperty.call(message, "tolerable_replication_lag")) + $root.vttime.Duration.encode(message.tolerable_replication_lag, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; @@ -140465,6 +140476,10 @@ export const vtctldata = $root.vtctldata = (() => { message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); break; } + case 6: { + message.tolerable_replication_lag = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -140521,6 +140536,11 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "wait_replicas_timeout." + error; } + if (message.tolerable_replication_lag != null && message.hasOwnProperty("tolerable_replication_lag")) { + let error = $root.vttime.Duration.verify(message.tolerable_replication_lag); + if (error) + return "tolerable_replication_lag." + error; + } return null; }; @@ -140555,6 +140575,11 @@ export const vtctldata = $root.vtctldata = (() => { throw TypeError(".vtctldata.PlannedReparentShardRequest.wait_replicas_timeout: object expected"); message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); } + if (object.tolerable_replication_lag != null) { + if (typeof object.tolerable_replication_lag !== "object") + throw TypeError(".vtctldata.PlannedReparentShardRequest.tolerable_replication_lag: object expected"); + message.tolerable_replication_lag = $root.vttime.Duration.fromObject(object.tolerable_replication_lag); + } return message; }; @@ -140577,6 +140602,7 @@ export const vtctldata = $root.vtctldata = (() => { object.new_primary = null; object.avoid_primary = null; object.wait_replicas_timeout = null; + object.tolerable_replication_lag = null; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; @@ -140588,6 +140614,8 @@ export const vtctldata = $root.vtctldata = (() => { object.avoid_primary = $root.topodata.TabletAlias.toObject(message.avoid_primary, options); if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.tolerable_replication_lag != null && message.hasOwnProperty("tolerable_replication_lag")) + object.tolerable_replication_lag = $root.vttime.Duration.toObject(message.tolerable_replication_lag, options); return object; };