From 071454f8173b30477070f6e944e9b9df56bb54fb Mon Sep 17 00:00:00 2001 From: Dirkjan Bussink Date: Tue, 19 Dec 2023 16:38:10 +0100 Subject: [PATCH] sqlparser: Refactor out servenv and inject everywhere (#14822) Signed-off-by: Dirkjan Bussink --- go/cmd/topo2topo/cli/topo2topo.go | 16 +- go/cmd/vtadmin/main.go | 11 +- go/cmd/vtcombo/cli/main.go | 22 +- go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go | 2 +- go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go | 2 +- go/cmd/vtcombo/cli/vschema_watcher.go | 2 +- go/cmd/vtctl/vtctl.go | 16 +- go/cmd/vtctld/cli/cli.go | 24 +- go/cmd/vtctld/cli/plugin_grpcvtctldserver.go | 2 +- go/cmd/vtctld/cli/plugin_grpcvtctlserver.go | 2 +- go/cmd/vtctld/cli/schema.go | 4 +- go/cmd/vtctldclient/command/root.go | 16 +- go/cmd/vtctldclient/command/schema.go | 3 +- .../command/vreplication/common/utils.go | 3 + .../command/vreplication/common/utils_test.go | 3 +- .../vreplication/materialize/create.go | 14 +- .../vreplication/materialize/materialize.go | 6 + .../vreplication/vdiff/vdiff_env_test.go | 3 +- go/cmd/vtexplain/cli/vtexplain.go | 11 +- go/cmd/vttablet/cli/cli.go | 34 +- go/mysql/auth_server_clientcert_test.go | 4 +- go/mysql/client.go | 2 +- go/mysql/client_test.go | 10 +- .../collations/integration/collations_test.go | 2 +- go/mysql/config/config.go | 1 + go/mysql/conn.go | 24 +- go/mysql/conn_fake.go | 2 +- go/mysql/conn_flaky_test.go | 17 +- go/mysql/conn_params.go | 2 + go/mysql/constants.go | 13 +- go/mysql/fakesqldb/server.go | 24 +- go/mysql/handshake_test.go | 4 +- go/mysql/mysql_fuzzer.go | 3 +- go/mysql/query.go | 13 +- go/mysql/server.go | 18 +- go/mysql/server_flaky_test.go | 53 +- go/mysql/sqlerror/sql_error.go | 3 +- go/mysql/streaming_query.go | 3 +- go/sqltypes/type.go | 8 +- go/sqltypes/type_test.go | 2 +- go/test/endtoend/cluster/mysqlctl_process.go | 10 - .../scheduler/onlineddl_scheduler_test.go | 3 +- .../vrepl/schemadiff_vrepl_suite_test.go | 10 +- go/test/endtoend/utils/mysql.go | 2 +- go/test/endtoend/vreplication/helper_test.go | 2 +- .../foreignkey/stress/fk_stress_test.go | 2 +- .../vtgate/queries/random/simplifier_test.go | 4 +- go/test/fuzzing/ast_fuzzer.go | 4 +- go/test/fuzzing/parser_fuzzer.go | 6 +- go/test/fuzzing/tabletserver_schema_fuzzer.go | 3 +- go/test/fuzzing/vt_schema_fuzzer.go | 4 +- go/test/fuzzing/vtctl_fuzzer.go | 3 +- go/test/vschemawrapper/vschema_wrapper.go | 6 +- go/vt/binlog/binlogplayer/dbclient.go | 12 +- go/vt/binlog/binlogplayer/mock_dbclient.go | 11 +- go/vt/binlog/keyspace_id_resolver.go | 12 +- go/vt/binlog/updatestreamctl.go | 7 +- go/vt/schema/online_ddl.go | 40 +- go/vt/schema/online_ddl_test.go | 46 +- go/vt/schemadiff/diff.go | 18 +- go/vt/schemadiff/diff_test.go | 38 +- go/vt/schemadiff/schema.go | 8 +- go/vt/schemadiff/schema_diff_test.go | 10 +- go/vt/schemadiff/schema_test.go | 40 +- go/vt/schemadiff/table_test.go | 18 +- go/vt/schemadiff/view_test.go | 10 +- go/vt/schemamanager/schemamanager_test.go | 16 +- go/vt/schemamanager/tablet_executor.go | 26 +- go/vt/schemamanager/tablet_executor_test.go | 13 +- go/vt/servenv/mysql.go | 6 +- go/vt/servenv/truncate_query.go | 34 + go/vt/sidecardb/sidecardb.go | 28 +- go/vt/sidecardb/sidecardb_test.go | 32 +- go/vt/sqlparser/analyzer.go | 4 +- go/vt/sqlparser/analyzer_test.go | 10 +- go/vt/sqlparser/ast_copy_on_rewrite_test.go | 15 +- go/vt/sqlparser/ast_rewriting_test.go | 24 +- go/vt/sqlparser/ast_test.go | 53 +- go/vt/sqlparser/cached_size.go | 2 +- go/vt/sqlparser/comments_test.go | 26 +- go/vt/sqlparser/keywords_test.go | 3 +- go/vt/sqlparser/like_filter_test.go | 15 +- go/vt/sqlparser/normalizer_test.go | 31 +- go/vt/sqlparser/parse_next_test.go | 18 +- go/vt/sqlparser/parse_table.go | 4 +- go/vt/sqlparser/parse_table_test.go | 3 +- go/vt/sqlparser/parse_test.go | 84 +- go/vt/sqlparser/parsed_query.go | 91 +- go/vt/sqlparser/parsed_query_test.go | 8 +- go/vt/sqlparser/parser.go | 112 +- go/vt/sqlparser/parser_test.go | 5 +- go/vt/sqlparser/precedence_test.go | 15 +- go/vt/sqlparser/predicate_rewriting_test.go | 9 +- go/vt/sqlparser/redact_query.go | 4 +- go/vt/sqlparser/redact_query_test.go | 3 +- go/vt/sqlparser/rewriter_test.go | 9 +- go/vt/sqlparser/token.go | 14 +- go/vt/sqlparser/token_test.go | 25 +- go/vt/sqlparser/tracked_buffer.go | 8 +- go/vt/sqlparser/tracked_buffer_test.go | 5 +- go/vt/sqlparser/truncate_query.go | 55 +- go/vt/sqlparser/truncate_query_test.go | 2 +- go/vt/sqlparser/utils.go | 14 +- go/vt/sqlparser/utils_test.go | 9 +- go/vt/throttler/demo/throttler_demo.go | 16 +- go/vt/topo/helpers/compare_test.go | 5 +- go/vt/topo/helpers/copy.go | 5 +- go/vt/topo/helpers/copy_test.go | 6 +- go/vt/topo/helpers/tee_test.go | 5 +- go/vt/vtadmin/api.go | 9 +- go/vt/vtadmin/api_authz_test.go | 103 +- go/vt/vtadmin/api_test.go | 70 +- .../vtadmin/testutil/authztestgen/template.go | 5 +- go/vt/vtadmin/testutil/cluster.go | 4 +- go/vt/vtcombo/tablet_map.go | 19 +- go/vt/vtctl/endtoend/get_schema_test.go | 5 +- go/vt/vtctl/endtoend/onlineddl_show_test.go | 4 +- go/vt/vtctl/grpcvtctlclient/client_test.go | 6 +- go/vt/vtctl/grpcvtctldclient/client_test.go | 7 +- .../endtoend/init_shard_primary_test.go | 9 +- go/vt/vtctl/grpcvtctldserver/server.go | 16 +- .../grpcvtctldserver/server_slow_test.go | 8 +- go/vt/vtctl/grpcvtctldserver/server_test.go | 189 +- .../testutil/test_tmclient.go | 2 +- go/vt/vtctl/grpcvtctlserver/server.go | 13 +- go/vt/vtctl/vtctl.go | 8 +- go/vt/vtctl/workflow/materializer.go | 16 +- go/vt/vtctl/workflow/materializer_env_test.go | 5 +- go/vt/vtctl/workflow/materializer_test.go | 8 +- go/vt/vtctl/workflow/server.go | 28 +- go/vt/vtctl/workflow/server_test.go | 4 +- go/vt/vtctl/workflow/stream_migrator.go | 6 +- go/vt/vtctl/workflow/stream_migrator_test.go | 4 +- go/vt/vtctl/workflow/utils.go | 8 +- .../workflow/vexec/query_planner_test.go | 2 +- go/vt/vtctl/workflow/vexec/testutil/query.go | 2 +- go/vt/vtctl/workflow/vexec/vexec.go | 8 +- go/vt/vtctld/action_repository.go | 12 +- go/vt/vtctld/api.go | 10 +- go/vt/vtctld/api_test.go | 6 +- go/vt/vtctld/tablet_data_test.go | 4 +- go/vt/vtctld/vtctld.go | 8 +- go/vt/vtexplain/vtexplain.go | 18 +- go/vt/vtexplain/vtexplain_test.go | 6 +- go/vt/vtexplain/vtexplain_vtgate.go | 6 +- go/vt/vtexplain/vtexplain_vttablet.go | 8 +- go/vt/vtexplain/vtexplain_vttablet_test.go | 17 +- go/vt/vtgate/engine/cached_size.go | 4 +- go/vt/vtgate/engine/ddl_test.go | 5 +- go/vt/vtgate/engine/fake_vcursor_test.go | 17 +- go/vt/vtgate/engine/insert_test.go | 44 +- go/vt/vtgate/engine/online_ddl.go | 7 +- go/vt/vtgate/engine/primitive.go | 1 + go/vt/vtgate/engine/revert_migration.go | 2 +- go/vt/vtgate/engine/set_test.go | 29 +- go/vt/vtgate/engine/update_test.go | 3 +- go/vt/vtgate/engine/vexplain.go | 2 +- go/vt/vtgate/evalengine/compiler_test.go | 14 +- .../evalengine/integration/fuzz_test.go | 4 +- go/vt/vtgate/evalengine/mysql_test.go | 2 +- go/vt/vtgate/evalengine/perf_test.go | 3 +- go/vt/vtgate/evalengine/translate_test.go | 10 +- go/vt/vtgate/executor.go | 34 +- go/vt/vtgate/executor_dml_test.go | 8 +- go/vt/vtgate/executor_framework_test.go | 33 +- go/vt/vtgate/executor_select_test.go | 22 +- go/vt/vtgate/executor_set_test.go | 16 +- go/vt/vtgate/executor_stream_test.go | 15 +- go/vt/vtgate/executor_test.go | 32 +- go/vt/vtgate/plan_execute.go | 4 +- go/vt/vtgate/planbuilder/builder.go | 2 +- go/vt/vtgate/planbuilder/ddl.go | 11 +- .../planbuilder/expression_converter_test.go | 2 +- go/vt/vtgate/planbuilder/operators/fuzz.go | 2 +- .../operators/queryprojection_test.go | 4 +- go/vt/vtgate/planbuilder/plan_test.go | 10 +- .../vtgate/planbuilder/plancontext/vschema.go | 3 + go/vt/vtgate/planbuilder/planner_test.go | 2 +- go/vt/vtgate/planbuilder/rewrite_test.go | 2 +- go/vt/vtgate/planbuilder/route.go | 2 +- go/vt/vtgate/planbuilder/select.go | 2 +- go/vt/vtgate/planbuilder/set.go | 2 +- go/vt/vtgate/planbuilder/show_test.go | 4 +- go/vt/vtgate/planbuilder/simplifier_test.go | 12 +- go/vt/vtgate/planbuilder/system_variables.go | 10 +- go/vt/vtgate/plugin_mysql_server.go | 7 + go/vt/vtgate/plugin_mysql_server_test.go | 16 +- go/vt/vtgate/querylogz.go | 2 +- go/vt/vtgate/queryz.go | 3 +- go/vt/vtgate/safe_session.go | 9 +- go/vt/vtgate/schema/tracker.go | 14 +- go/vt/vtgate/schema/tracker_test.go | 4 +- go/vt/vtgate/semantics/analyzer_test.go | 56 +- go/vt/vtgate/semantics/early_rewriter_test.go | 16 +- go/vt/vtgate/semantics/info_schema.go | 2534 +++++++++-------- go/vt/vtgate/semantics/semantic_state_test.go | 6 +- go/vt/vtgate/semantics/typer_test.go | 2 +- go/vt/vtgate/simplifier/simplifier_test.go | 4 +- go/vt/vtgate/vcursor_impl.go | 20 +- go/vt/vtgate/vcursor_impl_test.go | 13 +- go/vt/vtgate/vindexes/vschema.go | 36 +- go/vt/vtgate/vindexes/vschema_test.go | 94 +- go/vt/vtgate/vschema_manager.go | 7 +- go/vt/vtgate/vschema_manager_test.go | 2 +- go/vt/vtgate/vtgate.go | 30 +- go/vt/vttablet/endtoend/framework/client.go | 13 + go/vt/vttablet/endtoend/framework/server.go | 3 +- go/vt/vttablet/endtoend/misc_test.go | 67 +- go/vt/vttablet/onlineddl/analysis.go | 4 +- go/vt/vttablet/onlineddl/analysis_test.go | 5 +- go/vt/vttablet/onlineddl/executor.go | 52 +- go/vt/vttablet/onlineddl/executor_test.go | 29 +- go/vt/vttablet/onlineddl/vrepl.go | 7 +- go/vt/vttablet/onlineddl/vrepl/foreign_key.go | 3 +- .../onlineddl/vrepl/foreign_key_test.go | 4 +- go/vt/vttablet/onlineddl/vrepl/parser.go | 4 +- go/vt/vttablet/onlineddl/vrepl/parser_test.go | 22 +- go/vt/vttablet/sandboxconn/sandboxconn.go | 7 +- go/vt/vttablet/tabletmanager/restore.go | 2 +- go/vt/vttablet/tabletmanager/rpc_query.go | 10 +- .../tabletmanager/rpc_vreplication.go | 2 +- .../tabletmanager/rpc_vreplication_test.go | 8 +- go/vt/vttablet/tabletmanager/tm_init.go | 2 + go/vt/vttablet/tabletmanager/vdiff/engine.go | 10 +- .../tabletmanager/vdiff/framework_test.go | 2 +- go/vt/vttablet/tabletmanager/vdiff/report.go | 2 +- .../tabletmanager/vdiff/table_plan.go | 2 +- .../vreplication/controller_plan.go | 4 +- .../vreplication/controller_plan_test.go | 4 +- .../tabletmanager/vreplication/engine.go | 19 +- .../vreplication/external_connector.go | 7 +- .../vreplication/framework_test.go | 5 +- .../vreplication/replica_connector.go | 5 +- .../vreplication/replicator_plan.go | 73 +- .../vreplication/replicator_plan_test.go | 13 +- .../vreplication/table_plan_builder.go | 12 +- .../tabletmanager/vreplication/vcopier.go | 4 +- .../vreplication/vcopier_atomic.go | 2 +- .../tabletmanager/vreplication/vplayer.go | 2 +- .../tabletmanager/vreplication/vreplicator.go | 4 +- .../vreplication/vreplicator_test.go | 3 +- .../vttablet/tabletserver/connpool/dbconn.go | 7 +- .../tabletserver/connpool/dbconn_test.go | 5 +- go/vt/vttablet/tabletserver/connpool/pool.go | 2 +- .../tabletserver/connpool/pool_test.go | 5 +- .../tabletserver/exclude_race_test.go | 12 +- go/vt/vttablet/tabletserver/fuzz.go | 3 +- .../tabletserver/health_streamer_test.go | 10 +- .../vttablet/tabletserver/livequeryz_test.go | 15 +- .../tabletserver/messager/engine_test.go | 2 +- .../messager/message_manager_test.go | 2 +- .../planbuilder/permission_test.go | 2 +- .../vttablet/tabletserver/planbuilder/plan.go | 4 +- .../tabletserver/planbuilder/plan_test.go | 15 +- .../planbuilder/testdata/exec_cases.txt | 20 +- go/vt/vttablet/tabletserver/query_engine.go | 12 +- .../tabletserver/query_engine_test.go | 12 +- go/vt/vttablet/tabletserver/query_executor.go | 8 +- .../tabletserver/query_executor_test.go | 8 +- go/vt/vttablet/tabletserver/query_list.go | 7 +- .../vttablet/tabletserver/query_list_test.go | 6 +- go/vt/vttablet/tabletserver/querylogz.go | 2 +- go/vt/vttablet/tabletserver/queryz.go | 3 +- .../tabletserver/repltracker/reader_test.go | 8 +- .../repltracker/repltracker_test.go | 4 +- .../tabletserver/repltracker/writer_test.go | 4 +- go/vt/vttablet/tabletserver/schema/db.go | 46 +- go/vt/vttablet/tabletserver/schema/db_test.go | 39 +- go/vt/vttablet/tabletserver/schema/engine.go | 10 +- .../tabletserver/schema/engine_test.go | 17 +- .../tabletserver/schema/load_table_test.go | 2 +- go/vt/vttablet/tabletserver/schema/tracker.go | 8 +- .../tabletserver/schema/tracker_test.go | 7 +- .../tabletserver/state_manager_test.go | 15 +- .../tabletserver/stateful_connection.go | 5 +- .../tabletserver/stateful_connection_pool.go | 2 +- go/vt/vttablet/tabletserver/tabletenv/env.go | 7 +- go/vt/vttablet/tabletserver/tabletserver.go | 40 +- .../tabletserver/tabletserver_test.go | 81 +- .../tabletserver/throttle/throttler_test.go | 4 +- go/vt/vttablet/tabletserver/tx/api.go | 4 +- go/vt/vttablet/tabletserver/tx_engine_test.go | 11 +- go/vt/vttablet/tabletserver/tx_pool.go | 2 +- go/vt/vttablet/tabletserver/tx_pool_test.go | 3 +- .../tabletserver/txlimiter/tx_limiter_test.go | 7 +- .../txserializer/tx_serializer_test.go | 21 +- .../txthrottler/tx_throttler_test.go | 8 +- .../vttablet/tabletserver/vstreamer/engine.go | 2 +- go/vt/vttablet/tabletserver/vstreamer/fuzz.go | 3 +- .../vstreamer/local_vschema_test.go | 7 +- .../tabletserver/vstreamer/main_flaky_test.go | 3 +- .../tabletserver/vstreamer/planbuilder.go | 16 +- .../vstreamer/planbuilder_test.go | 20 +- .../tabletserver/vstreamer/resultstreamer.go | 2 +- .../tabletserver/vstreamer/rowstreamer.go | 4 +- .../tabletserver/vstreamer/testenv/testenv.go | 3 +- .../tabletserver/vstreamer/vstreamer.go | 10 +- go/vt/vttest/local_cluster.go | 4 +- go/vt/wrangler/external_cluster_test.go | 3 +- go/vt/wrangler/fake_dbclient_test.go | 2 +- go/vt/wrangler/fake_tablet_test.go | 16 +- go/vt/wrangler/materializer.go | 27 +- go/vt/wrangler/materializer_env_test.go | 5 +- go/vt/wrangler/materializer_test.go | 10 +- go/vt/wrangler/reparent.go | 2 +- go/vt/wrangler/resharder_env_test.go | 3 +- go/vt/wrangler/tablet_test.go | 11 +- go/vt/wrangler/testlib/backup_test.go | 9 +- .../testlib/copy_schema_shard_test.go | 3 +- .../testlib/emergency_reparent_shard_test.go | 5 +- .../testlib/external_reparent_test.go | 18 +- go/vt/wrangler/testlib/fake_tablet.go | 2 + go/vt/wrangler/testlib/find_tablet_test.go | 3 +- go/vt/wrangler/testlib/permissions_test.go | 3 +- .../testlib/planned_reparent_shard_test.go | 19 +- go/vt/wrangler/testlib/reparent_utils_test.go | 7 +- go/vt/wrangler/testlib/shard_test.go | 3 +- go/vt/wrangler/testlib/version_test.go | 3 +- go/vt/wrangler/testlib/vtctl_pipe.go | 3 +- go/vt/wrangler/traffic_switcher.go | 8 +- go/vt/wrangler/traffic_switcher_env_test.go | 6 +- go/vt/wrangler/vdiff.go | 18 +- go/vt/wrangler/vdiff_env_test.go | 3 +- go/vt/wrangler/vdiff_test.go | 4 +- go/vt/wrangler/vexec.go | 4 +- go/vt/wrangler/vexec_plan.go | 4 +- go/vt/wrangler/vexec_test.go | 11 +- go/vt/wrangler/wrangler.go | 13 +- go/vt/wrangler/wrangler_env_test.go | 3 +- 329 files changed, 3654 insertions(+), 3109 deletions(-) create mode 100644 go/vt/servenv/truncate_query.go diff --git a/go/cmd/topo2topo/cli/topo2topo.go b/go/cmd/topo2topo/cli/topo2topo.go index 6e7e173872b..5dda62eaed1 100644 --- a/go/cmd/topo2topo/cli/topo2topo.go +++ b/go/cmd/topo2topo/cli/topo2topo.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/helpers" ) @@ -94,12 +95,21 @@ func run(cmd *cobra.Command, args []string) error { return compareTopos(ctx, fromTS, toTS) } - return copyTopos(ctx, fromTS, toTS) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot create sqlparser: %w", err) + } + + return copyTopos(ctx, fromTS, toTS, parser) } -func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) error { +func copyTopos(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { if doKeyspaces { - if err := helpers.CopyKeyspaces(ctx, fromTS, toTS); err != nil { + if err := helpers.CopyKeyspaces(ctx, fromTS, toTS, parser); err != nil { return err } } diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index ff76fe2f3b6..224a6dbeacf 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cache" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -140,12 +141,20 @@ func run(cmd *cobra.Command, args []string) { cache.SetCacheRefreshKey(cacheRefreshKey) collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + fatal(err) + } s := vtadmin.NewAPI(clusters, vtadmin.Options{ GRPCOpts: opts, HTTPOpts: httpOpts, RBAC: rbacConfig, EnableDynamicClusters: enableDynamicClusters, - }, collationEnv) + }, collationEnv, parser) bootSpan.Finish() if err := s.ListenAndServe(); err != nil { diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go index c9fde3d2252..35620e2bd9a 100644 --- a/go/cmd/vtcombo/cli/main.go +++ b/go/cmd/vtcombo/cli/main.go @@ -30,15 +30,15 @@ import ( "github.com/spf13/cobra" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -82,6 +82,7 @@ In particular, it contains: ts *topo.Server collationEnv *collations.Environment resilientServer *srvtopo.ResilientServer + parser *sqlparser.Parser ) func init() { @@ -191,6 +192,15 @@ func run(cmd *cobra.Command, args []string) (err error) { servenv.Init() tabletenv.Init() + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("failed to initialize sql parser: %w", err) + } + var ( mysqld = &vtcomboMysqld{} cnf *mysqlctl.Mycnf @@ -222,7 +232,7 @@ func run(cmd *cobra.Command, args []string) (err error) { // to be the "internal" protocol that InitTabletMap registers. cmd.Flags().Set("tablet_manager_protocol", "internal") cmd.Flags().Set("tablet_protocol", "internal") - uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql, collationEnv) + uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql, collationEnv, parser) if err != nil { // ensure we start mysql in the event we fail here if startMysql { @@ -247,8 +257,8 @@ func run(cmd *cobra.Command, args []string) (err error) { } } - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv) - newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr, collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv, parser) + newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr, collationEnv, parser) if err != nil { return err } @@ -301,7 +311,7 @@ func run(cmd *cobra.Command, args []string) (err error) { vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion, collationEnv) // vtctld configuration and init - err = vtctld.InitVtctld(ts, collationEnv) + err = vtctld.InitVtctld(ts, collationEnv, parser) if err != nil { return err } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go index 2cf8eed8368..62a5e2bb358 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, ts, parser) } }) } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go index d815805f60a..e7f7b1b7302 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv) + grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv, parser) } }) } diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go index c1c9f120b96..e573109ab9e 100644 --- a/go/cmd/vtcombo/cli/vschema_watcher.go +++ b/go/cmd/vtcombo/cli/vschema_watcher.go @@ -63,7 +63,7 @@ func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.S log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) } - _, err = vindexes.BuildKeyspace(keyspace) + _, err = vindexes.BuildKeyspace(keyspace, parser) if err != nil { log.Fatalf("Invalid keyspace definition: %v", err) } diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index b44938eb776..8979028ea23 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -36,6 +36,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -130,6 +131,15 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), waitTime) installSignalHandlers(cancel) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("cannot initialize sql parser: %v", err) + } + // (TODO:ajm188) . // // For v12, we are going to support new commands by prefixing as: @@ -154,7 +164,7 @@ func main() { // New behavior. Strip off the prefix, and set things up to run through // the vtctldclient command tree, using the localvtctldclient (in-process) // client. - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, parser) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" @@ -171,7 +181,7 @@ func main() { default: log.Warningf("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) if args[0] == "--" { vtctl.PrintDoubleDashDeprecationNotice(wr) @@ -179,7 +189,7 @@ func main() { } action = args[0] - err := vtctl.RunCommand(ctx, wr, args) + err = vtctl.RunCommand(ctx, wr, args) cancel() switch err { case vtctl.ErrUnknownCommand: diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go index 86d6b0adf0d..b0135707512 100644 --- a/go/cmd/vtctld/cli/cli.go +++ b/go/cmd/vtctld/cli/cli.go @@ -21,7 +21,9 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctld" ) @@ -29,6 +31,7 @@ import ( var ( ts *topo.Server collationEnv *collations.Environment + parser *sqlparser.Parser Main = &cobra.Command{ Use: "vtctld", Short: "The Vitess cluster management daemon.", @@ -61,9 +64,18 @@ func run(cmd *cobra.Command, args []string) error { ts = topo.Open() defer ts.Close() + var err error collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } // Init the vtctld core - if err := vtctld.InitVtctld(ts, collationEnv); err != nil { + if err := vtctld.InitVtctld(ts, collationEnv, parser); err != nil { return err } @@ -89,4 +101,14 @@ func init() { servenv.MoveFlagsToCobraCommand(Main) acl.RegisterFlags(Main.Flags()) + + var err error + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("cannot initialize sql parser: %v", err) + } } diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go index ff283d91336..3385160e9f8 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, ts, parser) } }) } diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go index d815805f60a..e7f7b1b7302 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv) + grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv, parser) } }) } diff --git a/go/cmd/vtctld/cli/schema.go b/go/cmd/vtctld/cli/schema.go index 31d51d5be9f..9092dbf03bd 100644 --- a/go/cmd/vtctld/cli/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -71,11 +71,11 @@ func initSchema() { return } ctx := context.Background() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) _, err = schemamanager.Run( ctx, controller, - schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0), + schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0, parser), ) if err != nil { log.Errorf("Schema change failed, error: %v", err) diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go index a5848a7b42a..9e6b2df170b 100644 --- a/go/cmd/vtctldclient/command/root.go +++ b/go/cmd/vtctldclient/command/root.go @@ -29,8 +29,10 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" @@ -80,6 +82,8 @@ var ( actionTimeout time.Duration compactOutput bool + parser *sqlparser.Parser + topoOptions = struct { implementation string globalServerAddresses []string @@ -208,7 +212,7 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) return nil }) }) - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, parser) localvtctldclient.SetServer(vtctld) VtctldClientProtocol = "local" server = "" @@ -225,4 +229,14 @@ func init() { Root.PersistentFlags().StringSliceVar(&topoOptions.globalServerAddresses, "topo-global-server-address", topoOptions.globalServerAddresses, "the address of the global topology server(s)") Root.PersistentFlags().StringVar(&topoOptions.globalRoot, "topo-global-root", topoOptions.globalRoot, "the path of the global topology data in the global topology server") vreplcommon.RegisterCommands(Root) + + var err error + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("failed to initialize sqlparser: %v", err) + } } diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index 2d31e3500c1..4a46108ba26 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -123,7 +122,7 @@ func commandApplySchema(cmd *cobra.Command, args []string) error { allSQL = strings.Join(applySchemaOptions.SQL, ";") } - parts, err := sqlparser.SplitStatementToPieces(allSQL) + parts, err := parser.SplitStatementToPieces(allSQL) if err != nil { return err } diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go index da6e3329579..02dc88ae769 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go @@ -64,6 +64,9 @@ var ( DeferSecondaryKeys bool AutoStart bool StopAfterCopy bool + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int }{} ) diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go index 0dc179060d6..0660cb6d742 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/command" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -144,7 +145,7 @@ func SetupLocalVtctldClient(t *testing.T, ctx context.Context, cells ...string) tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { return nil }) - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" client, err := vtctldclient.New(command.VtctldClientProtocol, "") diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/create.go b/go/cmd/vtctldclient/command/vreplication/materialize/create.go index 51f3ee42ee9..88aed1c664c 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/create.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/create.go @@ -102,6 +102,15 @@ func commandCreate(cmd *cobra.Command, args []string) error { TabletSelectionPreference: tsp, } + createOptions.TableSettings.parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: common.CreateOptions.MySQLServerVersion, + TruncateUILen: common.CreateOptions.TruncateUILen, + TruncateErrLen: common.CreateOptions.TruncateErrLen, + }) + if err != nil { + return err + } + req := &vtctldatapb.MaterializeCreateRequest{ Settings: ms, } @@ -132,7 +141,8 @@ func commandCreate(cmd *cobra.Command, args []string) error { // tableSettings is a wrapper around a slice of TableMaterializeSettings // proto messages that implements the pflag.Value interface. type tableSettings struct { - val []*vtctldatapb.TableMaterializeSettings + val []*vtctldatapb.TableMaterializeSettings + parser *sqlparser.Parser } func (ts *tableSettings) String() string { @@ -157,7 +167,7 @@ func (ts *tableSettings) Set(v string) error { return fmt.Errorf("missing target_table or source_expression") } // Validate that the query is valid. - stmt, err := sqlparser.Parse(tms.SourceExpression) + stmt, err := ts.parser.Parse(tms.SourceExpression) if err != nil { return fmt.Errorf("invalid source_expression: %q", tms.SourceExpression) } diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go index 58be1ec4433..5845504af3f 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go @@ -17,9 +17,12 @@ limitations under the License. package materialize import ( + "fmt" + "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/topo/topoproto" ) @@ -46,6 +49,9 @@ func registerCommands(root *cobra.Command) { create.Flags().Var(&createOptions.TableSettings, "table-settings", "A JSON array defining what tables to materialize using what select statements. See the --help output for more details.") create.MarkFlagRequired("table-settings") create.Flags().BoolVar(&common.CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.") + create.Flags().StringVar(&common.CreateOptions.MySQLServerVersion, "mysql_server_version", fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), "Configure the MySQL version to use for example for the parser.") + create.Flags().IntVar(&common.CreateOptions.TruncateUILen, "sql-max-length-ui", 512, "truncate queries in debug UIs to the given length (default 512)") + create.Flags().IntVar(&common.CreateOptions.TruncateErrLen, "sql-max-length-errors", 0, "truncate queries in error logs to the given length (default unlimited)") base.AddCommand(create) // Generic workflow commands. diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go index 1a2a374cf81..23a4f2e0bbd 100644 --- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/workflow" @@ -83,7 +84,7 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.ws = workflow.NewServer(env.topoServ, env.tmc) + env.ws = workflow.NewServer(env.topoServ, env.tmc, sqlparser.NewTestParser()) env.tmc.testEnv = env // Generate a unique dialer name. diff --git a/go/cmd/vtexplain/cli/vtexplain.go b/go/cmd/vtexplain/cli/vtexplain.go index fdda3061492..ee71336a8d7 100644 --- a/go/cmd/vtexplain/cli/vtexplain.go +++ b/go/cmd/vtexplain/cli/vtexplain.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtexplain" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -175,7 +176,15 @@ func parseAndRun() error { } collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) - vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts, collationEnv) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } + vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts, collationEnv, parser) if err != nil { return err } diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index 7efa66e58c8..80c1a904419 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -25,14 +25,14 @@ import ( "github.com/spf13/cobra" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/binlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/tableacl/simpleacl" "vitess.io/vitess/go/vt/topo" @@ -112,6 +112,15 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to parse --tablet-path: %w", err) } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot initialize sql parser: %w", err) + } + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) // config and mycnf initializations are intertwined. config, mycnf, err := initConfig(tabletAlias, collationEnv) @@ -120,7 +129,7 @@ func run(cmd *cobra.Command, args []string) error { } ts := topo.Open() - qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias, collationEnv) + qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias, collationEnv, parser) if err != nil { ts.Close() return err @@ -134,6 +143,14 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to extract online DDL binaries: %w", err) } + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot initialize sql parser: %w", err) + } // Initialize and start tm. gRPCPort := int32(0) if servenv.GRPCPort() != 0 { @@ -150,10 +167,11 @@ func run(cmd *cobra.Command, args []string) error { MysqlDaemon: mysqld, DBConfigs: config.DB.Clone(), QueryServiceControl: qsc, - UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler(), collationEnv), - VDiffEngine: vdiff.NewEngine(config, ts, tablet, collationEnv), + UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine(), parser), + VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler(), collationEnv, parser), + VDiffEngine: vdiff.NewEngine(ts, tablet, collationEnv, parser), CollationEnv: collationEnv, + SQLParser: parser, } if err := tm.Start(tablet, config); err != nil { ts.Close() @@ -241,7 +259,7 @@ func extractOnlineDDL() error { return nil } -func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, collationEnv *collations.Environment) (*tabletserver.TabletServer, error) { +func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) (*tabletserver.TabletServer, error) { if tableACLConfig != "" { // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory tableacl.Register("simpleacl", &simpleacl.Factory{}) @@ -250,7 +268,7 @@ func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts } // creates and registers the query service - qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias, collationEnv) + qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias, collationEnv, parser) servenv.OnRun(func() { qsc.Register() addStatusParts(qsc) diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 3314116e953..ca32bbfc7ee 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -39,7 +39,7 @@ func TestValidCert(t *testing.T) { authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -108,7 +108,7 @@ func TestNoCert(t *testing.T) { authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/client.go b/go/mysql/client.go index 108ef6f774b..16740bf38db 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -106,7 +106,7 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) { } // Send the connection back, so the other side can close it. - c := newConn(conn, params.FlushDelay) + c := newConn(conn, params.FlushDelay, params.TruncateErrLen) status <- connectResult{ c: c, } diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go index 057a8584679..5e9a634c13f 100644 --- a/go/mysql/client_test.go +++ b/go/mysql/client_test.go @@ -151,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -223,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -296,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -343,7 +343,7 @@ func TestTLSClientVerifyCA(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -426,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go index 8db7140d6c1..519f4560faf 100644 --- a/go/mysql/collations/integration/collations_test.go +++ b/go/mysql/collations/integration/collations_test.go @@ -60,7 +60,7 @@ func getSQLQueries(t *testing.T, testfile string) []string { addchunk := func() { if curchunk.Len() > 0 { - stmts, err := sqlparser.SplitStatementToPieces(curchunk.String()) + stmts, err := sqlparser.NewTestParser().SplitStatementToPieces(curchunk.String()) if err != nil { t.Fatal(err) } diff --git a/go/mysql/config/config.go b/go/mysql/config/config.go index 8abf9d7dc71..cc08107f0a3 100644 --- a/go/mysql/config/config.go +++ b/go/mysql/config/config.go @@ -1,3 +1,4 @@ package config const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" +const DefaultMySQLVersion = "8.0.30" diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 402906b9b75..4dcf87c4867 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -215,6 +215,8 @@ type Conn struct { // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and // the connection gets closed. closing bool + + truncateErrLen int } // PrepareData is a buffer used for store prepare statement meta data @@ -246,7 +248,7 @@ var readersPool = sync.Pool{New: func() any { return bufio.NewReaderSize(nil, co // newConn is an internal method to create a Conn. Used by client and server // side for common creation code. -func newConn(conn net.Conn, flushDelay time.Duration) *Conn { +func newConn(conn net.Conn, flushDelay time.Duration, truncateErrLen int) *Conn { if flushDelay == 0 { flushDelay = DefaultFlushDelay } @@ -254,6 +256,7 @@ func newConn(conn net.Conn, flushDelay time.Duration) *Conn { conn: conn, bufferedReader: bufio.NewReaderSize(conn, connBufferSize), flushDelay: flushDelay, + truncateErrLen: truncateErrLen, } } @@ -274,11 +277,12 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { } c := &Conn{ - conn: conn, - listener: listener, - PrepareData: make(map[uint32]*PrepareData), - keepAliveOn: enabledKeepAlive, - flushDelay: listener.flushDelay, + conn: conn, + listener: listener, + PrepareData: make(map[uint32]*PrepareData), + keepAliveOn: enabledKeepAlive, + flushDelay: listener.flushDelay, + truncateErrLen: listener.truncateErrLen, } if listener.connReadBufferSize > 0 { @@ -1232,7 +1236,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { var queries []string if c.Capabilities&CapabilityClientMultiStatements != 0 { var err error - queries, err = sqlparser.SplitStatementToPieces(query) + queries, err = handler.SQLParser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) @@ -1245,14 +1249,14 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { queries = []string{query} } - // Popoulate PrepareData + // Populate PrepareData c.StatementID++ prepare := &PrepareData{ StatementID: c.StatementID, PrepareStmt: queries[0], } - statement, err := sqlparser.ParseStrictDDL(query) + statement, err := handler.SQLParser().ParseStrictDDL(query) if err != nil { log.Errorf("Conn %v: Error parsing prepared statement: %v", c, err) if !c.writeErrorPacketFromErrorAndLog(err) { @@ -1360,7 +1364,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { var queries []string var err error if c.Capabilities&CapabilityClientMultiStatements != 0 { - queries, err = sqlparser.SplitStatementToPieces(query) + queries, err = handler.SQLParser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go index c20d09a2f6d..7bc4fd5ff61 100644 --- a/go/mysql/conn_fake.go +++ b/go/mysql/conn_fake.go @@ -84,7 +84,7 @@ var _ net.Addr = (*mockAddress)(nil) // GetTestConn returns a conn for testing purpose only. func GetTestConn() *Conn { - return newConn(testConn{}, DefaultFlushDelay) + return newConn(testConn{}, DefaultFlushDelay, 0) } // GetTestServerConn is only meant to be used for testing. diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go index e829131698b..da82a577753 100644 --- a/go/mysql/conn_flaky_test.go +++ b/go/mysql/conn_flaky_test.go @@ -39,6 +39,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { @@ -73,8 +74,8 @@ func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { require.Nil(t, serverErr, "Accept failed: %v", serverErr) // Create a Conn on both sides. - cConn := newConn(clientConn, DefaultFlushDelay) - sConn := newConn(serverConn, DefaultFlushDelay) + cConn := newConn(clientConn, DefaultFlushDelay, 0) + sConn := newConn(serverConn, DefaultFlushDelay, 0) sConn.PrepareData = map[uint32]*PrepareData{} return listener, sConn, cConn @@ -930,7 +931,7 @@ func TestConnectionErrorWhileWritingComQuery(t *testing.T) { pos: -1, queryPacket: []byte{0x21, 0x00, 0x00, 0x00, ComQuery, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x40, 0x40, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x20, 0x31}, - }, DefaultFlushDelay) + }, DefaultFlushDelay, 0) // this handler will return an error on the first run, and fail the test if it's run more times errorString := make([]byte, 17000) @@ -946,7 +947,7 @@ func TestConnectionErrorWhileWritingComStmtSendLongData(t *testing.T) { pos: -1, queryPacket: []byte{0x21, 0x00, 0x00, 0x00, ComStmtSendLongData, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x40, 0x40, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x20, 0x31}, - }, DefaultFlushDelay) + }, DefaultFlushDelay, 0) // this handler will return an error on the first run, and fail the test if it's run more times handler := &testRun{t: t, err: fmt.Errorf("not used")} @@ -960,7 +961,7 @@ func TestConnectionErrorWhileWritingComPrepare(t *testing.T) { writeToPass: []bool{false}, pos: -1, queryPacket: []byte{0x01, 0x00, 0x00, 0x00, ComPrepare}, - }, DefaultFlushDelay) + }, DefaultFlushDelay, 0) sConn.Capabilities = sConn.Capabilities | CapabilityClientMultiStatements // this handler will return an error on the first run, and fail the test if it's run more times handler := &testRun{t: t, err: fmt.Errorf("not used")} @@ -975,7 +976,7 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) { pos: -1, queryPacket: []byte{0x21, 0x00, 0x00, 0x00, ComStmtExecute, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x40, 0x40, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x20, 0x31}, - }, DefaultFlushDelay) + }, DefaultFlushDelay, 0) // this handler will return an error on the first run, and fail the test if it's run more times handler := &testRun{t: t, err: fmt.Errorf("not used")} res := sConn.handleNextCommand(handler) @@ -1138,4 +1139,8 @@ func (t testRun) WarningCount(c *Conn) uint16 { return 0 } +func (t testRun) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + var _ Handler = (*testRun)(nil) diff --git a/go/mysql/conn_params.go b/go/mysql/conn_params.go index 2476718355a..46e733f6021 100644 --- a/go/mysql/conn_params.go +++ b/go/mysql/conn_params.go @@ -63,6 +63,8 @@ type ConnParams struct { // FlushDelay is the delay after which buffered response will be flushed to the client. FlushDelay time.Duration + + TruncateErrLen int } // EnableSSL will set the right flag on the parameters. diff --git a/go/mysql/constants.go b/go/mysql/constants.go index 194ed568b39..1d848c7b8ac 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -17,7 +17,7 @@ limitations under the License. package mysql import ( - "vitess.io/vitess/go/mysql/binlog" + "vitess.io/vitess/go/sqltypes" ) const ( @@ -274,10 +274,15 @@ const ( AuthSwitchRequestPacket = 0xfe ) +var typeInt24, _ = sqltypes.TypeToMySQL(sqltypes.Int24) +var typeTimestamp, _ = sqltypes.TypeToMySQL(sqltypes.Timestamp) +var typeYear, _ = sqltypes.TypeToMySQL(sqltypes.Year) +var typeNewDecimal, _ = sqltypes.TypeToMySQL(sqltypes.Decimal) + // IsNum returns true if a MySQL type is a numeric value. // It is the same as IS_NUM defined in mysql.h. func IsNum(typ uint8) bool { - return (typ <= binlog.TypeInt24 && typ != binlog.TypeTimestamp) || - typ == binlog.TypeYear || - typ == binlog.TypeNewDecimal + return (typ <= typeInt24 && typ != typeTimestamp) || + typ == typeYear || + typ == typeNewDecimal } diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index 097e4101b92..e522f0ad5af 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -29,15 +29,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/config" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" - + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) const appendEntry = -1 @@ -128,6 +126,8 @@ type DB struct { // lastError stores the last error in returning a query result. lastErrorMu sync.Mutex lastError error + + parser *sqlparser.Parser } // QueryHandler is the interface used by the DB to simulate executed queries @@ -181,6 +181,7 @@ func New(t testing.TB) *DB { queryPatternUserCallback: make(map[*regexp.Regexp]func(string)), patternData: make(map[string]exprResult), lastErrorMu: sync.Mutex{}, + parser: sqlparser.NewTestParser(), } db.Handler = db @@ -188,7 +189,7 @@ func New(t testing.TB) *DB { authServer := mysql.NewAuthServerNone() // Start listening. - db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0, 0, "8.0.30-Vitess") + db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0, 0, fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -432,9 +433,10 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R return callback(&sqltypes.Result{}) } // Nothing matched. + parser := sqlparser.NewTestParser() err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", - sqlparser.TruncateForUI(query), db.name) - log.Errorf("Query not found: %s", sqlparser.TruncateForUI(query)) + parser.TruncateForUI(query), db.name) + log.Errorf("Query not found: %s", parser.TruncateForUI(query)) return err } @@ -838,3 +840,7 @@ func (db *DB) GetQueryPatternResult(key string) (func(string), ExpectedResult, b return nil, ExpectedResult{nil, nil}, false, nil } + +func (db *DB) SQLParser() *sqlparser.Parser { + return db.parser +} diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index 57ed604daae..db65012268a 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -45,7 +45,7 @@ func TestClearTextClientAuth(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -99,7 +99,7 @@ func TestSSLConnection(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/mysql_fuzzer.go b/go/mysql/mysql_fuzzer.go index 057f2ac01c3..7370ad8a479 100644 --- a/go/mysql/mysql_fuzzer.go +++ b/go/mysql/mysql_fuzzer.go @@ -31,6 +31,7 @@ import ( gofuzzheaders "github.com/AdaLogics/go-fuzz-headers" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/tlstest" @@ -327,7 +328,7 @@ func FuzzTLSServer(data []byte) int { Password: "password1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, 0, "8.0.30-Vitess") + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, 0, DefaultFlushDelay, fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), 512, 0) if err != nil { return -1 } diff --git a/go/mysql/query.go b/go/mysql/query.go index 7cfeafd258f..758fa7cfe52 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" @@ -147,7 +148,7 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { } // Convert MySQL type to Vitess type. - field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) + field.Type, err = sqltypes.MySQLToType(t, int64(flags)) if err != nil { return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } @@ -243,7 +244,7 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { } // Convert MySQL type to Vitess type. - field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) + field.Type, err = sqltypes.MySQLToType(t, int64(flags)) if err != nil { return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } @@ -313,7 +314,7 @@ func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (re defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() @@ -337,7 +338,7 @@ func (c *Conn) ExecuteFetchWithWarningCount(query string, maxrows int, wantfield defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() @@ -596,7 +597,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b } // convert MySQL type to internal type. - valType, err := sqltypes.MySQLToType(int64(mysqlType), int64(flags)) + valType, err := sqltypes.MySQLToType(mysqlType, int64(flags)) if err != nil { return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) } @@ -930,7 +931,7 @@ func (c *Conn) writeColumnDefinition(field *querypb.Field) error { pos = writeByte(data, pos, 0x0c) pos = writeUint16(data, pos, uint16(field.Charset)) pos = writeUint32(data, pos, field.ColumnLength) - pos = writeByte(data, pos, byte(typ)) + pos = writeByte(data, pos, typ) pos = writeUint16(data, pos, uint16(flags)) pos = writeByte(data, pos, byte(field.Decimals)) pos = writeUint16(data, pos, uint16(0x0000)) diff --git a/go/mysql/server.go b/go/mysql/server.go index a5fde0c19c8..ddc9fe06ff6 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -27,10 +27,9 @@ import ( "github.com/pires/go-proxyproto" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" @@ -39,6 +38,7 @@ import ( "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" ) @@ -132,6 +132,8 @@ type Handler interface { WarningCount(c *Conn) uint16 ComResetConnection(c *Conn) + + SQLParser() *sqlparser.Parser } // UnimplementedHandler implemnts all of the optional callbacks so as to satisy @@ -217,6 +219,8 @@ type Listener struct { // charset is the default server side character set to use for the connection charset collations.ID + // parser to use for this listener, configured with the correct version. + truncateErrLen int } // NewFromListener creates a new mysql listener from an existing net.Listener @@ -230,6 +234,8 @@ func NewFromListener( keepAlivePeriod time.Duration, flushDelay time.Duration, mysqlServerVersion string, + truncateErrLen int, + ) (*Listener, error) { cfg := ListenerConfig{ Listener: l, @@ -242,6 +248,7 @@ func NewFromListener( ConnKeepAlivePeriod: keepAlivePeriod, FlushDelay: flushDelay, MySQLServerVersion: mysqlServerVersion, + TruncateErrLen: truncateErrLen, } return NewListenerWithConfig(cfg) } @@ -258,6 +265,7 @@ func NewListener( keepAlivePeriod time.Duration, flushDelay time.Duration, mysqlServerVersion string, + truncateErrLen int, ) (*Listener, error) { listener, err := net.Listen(protocol, address) if err != nil { @@ -265,10 +273,10 @@ func NewListener( } if proxyProtocol { proxyListener := &proxyproto.Listener{Listener: listener} - return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay, mysqlServerVersion) + return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay, mysqlServerVersion, truncateErrLen) } - return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay, mysqlServerVersion) + return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay, mysqlServerVersion, truncateErrLen) } // ListenerConfig should be used with NewListenerWithConfig to specify listener parameters. @@ -286,6 +294,7 @@ type ListenerConfig struct { ConnKeepAlivePeriod time.Duration FlushDelay time.Duration MySQLServerVersion string + TruncateErrLen int } // NewListenerWithConfig creates new listener using provided config. There are @@ -314,6 +323,7 @@ func NewListenerWithConfig(cfg ListenerConfig) (*Listener, error) { connBufferPooling: cfg.ConnBufferPooling, connKeepAlivePeriod: cfg.ConnKeepAlivePeriod, flushDelay: cfg.FlushDelay, + truncateErrLen: cfg.TruncateErrLen, }, nil } diff --git a/go/mysql/server_flaky_test.go b/go/mysql/server_flaky_test.go index 6b1825eba76..c7b40ddcf8e 100644 --- a/go/mysql/server_flaky_test.go +++ b/go/mysql/server_flaky_test.go @@ -32,14 +32,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" vtenv "vitess.io/vitess/go/vt/env" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tlstest" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" @@ -81,6 +81,7 @@ type testHandler struct { result *sqltypes.Result err error warnings uint16 + parser *sqlparser.Parser } func (th *testHandler) LastConn() *Conn { @@ -256,6 +257,10 @@ func (th *testHandler) WarningCount(c *Conn) uint16 { return th.warnings } +func (th *testHandler) SQLParser() *sqlparser.Parser { + return th.parser +} + func getHostPort(t *testing.T, a net.Addr) (string, int) { host := a.(*net.TCPAddr).IP.String() port := a.(*net.TCPAddr).Port @@ -263,7 +268,7 @@ func getHostPort(t *testing.T, a net.Addr) (string, int) { return host, port } -const mysqlVersion = "8.0.30-Vitess" +var mysqlVersion = fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion) func TestConnectionFromListener(t *testing.T) { th := &testHandler{} @@ -279,7 +284,7 @@ func TestConnectionFromListener(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:") require.NoError(t, err, "net.Listener failed") - l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0, 0, mysqlVersion) + l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -308,7 +313,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -341,7 +346,7 @@ func TestConnectionWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -374,7 +379,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -412,7 +417,7 @@ func TestConnectionUnixSocket(t *testing.T) { os.Remove(unixSocket.Name()) - l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -438,7 +443,7 @@ func TestClientFoundRows(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -487,7 +492,7 @@ func TestConnCounts(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -544,7 +549,7 @@ func TestServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -644,7 +649,7 @@ func TestServerStats(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -718,7 +723,7 @@ func TestClearTextServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -791,7 +796,7 @@ func TestDialogServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) l.AllowClearTextWithoutTLS.Store(true) defer l.Close() @@ -834,7 +839,7 @@ func TestTLSServer(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -932,7 +937,7 @@ func TestTLSRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -1021,7 +1026,7 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1115,7 +1120,7 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1184,7 +1189,7 @@ func TestCachingSha2PasswordAuthWithoutTLS(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1226,7 +1231,7 @@ func TestErrorCodes(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1404,7 +1409,7 @@ func TestListenerShutdown(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1475,7 +1480,7 @@ func TestServerFlush(t *testing.T) { mysqlServerFlushDelay := 10 * time.Millisecond th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, mysqlServerFlushDelay, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, mysqlServerFlushDelay, mysqlVersion, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1521,7 +1526,7 @@ func TestServerFlush(t *testing.T) { func TestTcpKeepAlive(t *testing.T) { th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() go l.Accept() diff --git a/go/mysql/sqlerror/sql_error.go b/go/mysql/sqlerror/sql_error.go index 7a71070a70c..9601026a7be 100644 --- a/go/mysql/sqlerror/sql_error.go +++ b/go/mysql/sqlerror/sql_error.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -62,7 +61,7 @@ func (se *SQLError) Error() string { fmt.Fprintf(&buf, " (errno %v) (sqlstate %v)", se.Num, se.State) if se.Query != "" { - fmt.Fprintf(&buf, " during query: %s", sqlparser.TruncateForLog(se.Query)) + fmt.Fprintf(&buf, " during query: %s", se.Query) } return buf.String() diff --git a/go/mysql/streaming_query.go b/go/mysql/streaming_query.go index 257c56e076f..452f1af3206 100644 --- a/go/mysql/streaming_query.go +++ b/go/mysql/streaming_query.go @@ -19,6 +19,7 @@ package mysql import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -32,7 +33,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index d3436ed8718..964dd6b5d83 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -189,7 +189,7 @@ const ( // If you add to this map, make sure you add a test case // in tabletserver/endtoend. -var mysqlToType = map[int64]querypb.Type{ +var mysqlToType = map[byte]querypb.Type{ 0: Decimal, 1: Int8, 2: Int16, @@ -275,7 +275,7 @@ func modifyType(typ querypb.Type, flags int64) querypb.Type { } // MySQLToType computes the vitess type from mysql type and flags. -func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { +func MySQLToType(mysqlType byte, flags int64) (typ querypb.Type, err error) { result, ok := mysqlToType[mysqlType] if !ok { return 0, fmt.Errorf("unsupported type: %d", mysqlType) @@ -303,7 +303,7 @@ func AreTypesEquivalent(mysqlTypeFromBinlog, mysqlTypeFromSchema querypb.Type) b // typeToMySQL is the reverse of mysqlToType. var typeToMySQL = map[querypb.Type]struct { - typ int64 + typ byte flags int64 }{ Int8: {typ: 1}, @@ -342,7 +342,7 @@ var typeToMySQL = map[querypb.Type]struct { } // TypeToMySQL returns the equivalent mysql type and flag for a vitess type. -func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { +func TypeToMySQL(typ querypb.Type) (mysqlType byte, flags int64) { val := typeToMySQL[typ] return val.typ, val.flags } diff --git a/go/sqltypes/type_test.go b/go/sqltypes/type_test.go index f223c5811e3..edf340b2abb 100644 --- a/go/sqltypes/type_test.go +++ b/go/sqltypes/type_test.go @@ -285,7 +285,7 @@ func TestTypeToMySQL(t *testing.T) { func TestMySQLToType(t *testing.T) { testcases := []struct { - intype int64 + intype byte inflags int64 outtype querypb.Type }{{ diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index 79248b6d9a7..cfc4fc28088 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -253,16 +253,6 @@ func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) { os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", tabletUID))) } -// Connect returns a new connection to the underlying MySQL server -func (mysqlctl *MysqlctlProcess) Connect(ctx context.Context, username string) (*mysql.Conn, error) { - params := mysql.ConnParams{ - Uname: username, - UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", mysqlctl.TabletUID), "/mysql.sock"), - } - - return mysql.Connect(ctx, ¶ms) -} - // MysqlCtlProcessInstanceOptionalInit returns a Mysqlctl handle for mysqlctl process // configured with the given Config. func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) (*MysqlctlProcess, error) { diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index dd3cb1dbb4c..f289b4d83b2 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -127,7 +127,8 @@ deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, func parseTableName(t *testing.T, sql string) (tableName string) { // ddlStatement could possibly be composed of multiple DDL statements - tokenizer := sqlparser.NewStringTokenizer(sql) + parser := sqlparser.NewTestParser() + tokenizer := parser.NewStringTokenizer(sql) for { stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) if err != nil && errors.Is(err, io.EOF) { diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 055dc7a1df5..5a5ecf11428 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -345,12 +345,12 @@ func ignoreAutoIncrement(t *testing.T, createTable string) string { func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, allowSchemadiffNormalization bool, hints *schemadiff.DiffHints) { // turn the "from" and "to" create statement strings (which we just read via SHOW CREATE TABLE into sqlparser.CreateTable statement) - fromStmt, err := sqlparser.ParseStrictDDL(fromCreateTable) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(fromCreateTable) require.NoError(t, err) fromCreateTableStatement, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(toCreateTable) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(toCreateTable) require.NoError(t, err) toCreateTableStatement, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -394,7 +394,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al // structure is identical. And so we accept that there can be a normalization issue. if allowSchemadiffNormalization { { - stmt, err := sqlparser.ParseStrictDDL(toCreateTable) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(toCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -403,7 +403,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al toCreateTable = c.Create().CanonicalStatementString() } { - stmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -418,7 +418,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al assert.Equal(t, toCreateTable, resultCreateTable, "mismatched table structure. ALTER query was: %s", diffedAlterQuery) // Also, let's see that our diff agrees there's no change: - resultStmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + resultStmt, err := sqlparser.NewTestParser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) resultCreateTableStatement, ok := resultStmt.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index dcc687ff1a6..ca43ff15970 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -195,7 +195,7 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn } } } - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Error(err) return err diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 07c12caf194..fc7d66bc732 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -428,7 +428,7 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro require.NotNil(t, res) row := res.Named().Row() tableSchema := row["Create Table"].ToString() - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := sqlparser.NewTestParser().ParseStrictDDL(tableSchema) require.NoError(t, err) createTable, ok := parsedDDL.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index ae2b9324bb6..23ad27f6750 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -810,7 +810,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } if expectHint != "" { - stmt, err := sqlparser.Parse(alterStatement) + stmt, err := sqlparser.NewTestParser().Parse(alterStatement) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) diff --git a/go/test/endtoend/vtgate/queries/random/simplifier_test.go b/go/test/endtoend/vtgate/queries/random/simplifier_test.go index 2be9ef8ab93..13f4f891c7b 100644 --- a/go/test/endtoend/vtgate/queries/random/simplifier_test.go +++ b/go/test/endtoend/vtgate/queries/random/simplifier_test.go @@ -88,13 +88,13 @@ func simplifyResultsMismatchedQuery(t *testing.T, query string) string { formal, err := vindexes.LoadFormal("svschema.json") require.NoError(t, err) - vSchema := vindexes.BuildVSchema(formal) + vSchema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) vSchemaWrapper := &vschemawrapper.VSchemaWrapper{ V: vSchema, Version: planbuilder.Gen4, } - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) simplified := simplifier.SimplifyStatement( diff --git a/go/test/fuzzing/ast_fuzzer.go b/go/test/fuzzing/ast_fuzzer.go index 118f044ea66..5951a0da9eb 100644 --- a/go/test/fuzzing/ast_fuzzer.go +++ b/go/test/fuzzing/ast_fuzzer.go @@ -36,11 +36,11 @@ func FuzzEqualsSQLNode(data []byte) int { if err != nil { return 0 } - inA, err := sqlparser.Parse(query1) + inA, err := sqlparser.NewTestParser().Parse(query1) if err != nil { return 0 } - inB, err := sqlparser.Parse(query2) + inB, err := sqlparser.NewTestParser().Parse(query2) if err != nil { return 0 } diff --git a/go/test/fuzzing/parser_fuzzer.go b/go/test/fuzzing/parser_fuzzer.go index 67b8a30ef00..04a37e6dbcb 100644 --- a/go/test/fuzzing/parser_fuzzer.go +++ b/go/test/fuzzing/parser_fuzzer.go @@ -42,7 +42,7 @@ func FuzzNormalizer(data []byte) int { } func FuzzParser(data []byte) int { - _, err := sqlparser.Parse(string(data)) + _, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -55,7 +55,7 @@ func FuzzNodeFormat(data []byte) int { if err != nil { return 0 } - node, err := sqlparser.Parse(query) + node, err := sqlparser.NewTestParser().Parse(query) if err != nil { return 0 } @@ -69,6 +69,6 @@ func FuzzNodeFormat(data []byte) int { } func FuzzSplitStatementToPieces(data []byte) int { - _, _ = sqlparser.SplitStatementToPieces(string(data)) + _, _ = sqlparser.NewTestParser().SplitStatementToPieces(string(data)) return 1 } diff --git a/go/test/fuzzing/tabletserver_schema_fuzzer.go b/go/test/fuzzing/tabletserver_schema_fuzzer.go index 05abd6154dd..655d0fb1606 100644 --- a/go/test/fuzzing/tabletserver_schema_fuzzer.go +++ b/go/test/fuzzing/tabletserver_schema_fuzzer.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -67,7 +68,7 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl IdleTimeout: 10 * time.Second, } - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8()), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser()), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { diff --git a/go/test/fuzzing/vt_schema_fuzzer.go b/go/test/fuzzing/vt_schema_fuzzer.go index 2092eac866a..79a30d3394a 100644 --- a/go/test/fuzzing/vt_schema_fuzzer.go +++ b/go/test/fuzzing/vt_schema_fuzzer.go @@ -26,7 +26,7 @@ import ( // FuzzOnlineDDLFromCommentedStatement implements a fuzzer // that targets schema.OnlineDDLFromCommentedStatement func FuzzOnlineDDLFromCommentedStatement(data []byte) int { - stmt, err := sqlparser.Parse(string(data)) + stmt, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -75,7 +75,7 @@ func FuzzNewOnlineDDLs(data []byte) int { return 0 } - onlineDDLs, err := schema.NewOnlineDDLs(keyspace, sql, ddlStmt, ddlStrategySetting, requestContext) + onlineDDLs, err := schema.NewOnlineDDLs(sql, ddlStmt, ddlStrategySetting, requestContext, keyspace) if err != nil { return 0 } diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go index 5441ec4f4a8..ee9cf8a6b4b 100644 --- a/go/test/fuzzing/vtctl_fuzzer.go +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl" @@ -181,7 +182,7 @@ func Fuzz(data []byte) int { // Add params to the command commandSlice = append(commandSlice, args...) - _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc, collations.MySQL8()), commandSlice) + _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc, collations.MySQL8(), sqlparser.NewTestParser()), commandSlice) command++ } diff --git a/go/test/vschemawrapper/vschema_wrapper.go b/go/test/vschemawrapper/vschema_wrapper.go index 7478bdb8af1..21617dcdaee 100644 --- a/go/test/vschemawrapper/vschema_wrapper.go +++ b/go/test/vschemawrapper/vschema_wrapper.go @@ -82,7 +82,7 @@ func (vw *VSchemaWrapper) PlanPrepareStatement(ctx context.Context, query string if err != nil { return nil, nil, err } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := vw.SQLParser().Parse2(query) if err != nil { return nil, nil, err } @@ -130,6 +130,10 @@ func (vw *VSchemaWrapper) CollationEnv() *collations.Environment { return collations.MySQL8() } +func (vw *VSchemaWrapper) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + func (vw *VSchemaWrapper) PlannerWarning(_ string) { } diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index e4896048db5..bc96e690b76 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -46,6 +46,7 @@ type DBClient interface { type dbClientImpl struct { dbConfig dbconfigs.Connector dbConn *mysql.Conn + parser *sqlparser.Parser } // dbClientImplWithSidecarDBReplacement is a DBClient implementation @@ -57,14 +58,15 @@ type dbClientImplWithSidecarDBReplacement struct { } // NewDBClient creates a DBClient instance -func NewDBClient(params dbconfigs.Connector) DBClient { +func NewDBClient(params dbconfigs.Connector, parser *sqlparser.Parser) DBClient { if sidecar.GetName() != sidecar.DefaultName { return &dbClientImplWithSidecarDBReplacement{ - dbClientImpl{dbConfig: params}, + dbClientImpl{dbConfig: params, parser: parser}, } } return &dbClientImpl{ dbConfig: params, + parser: parser, } } @@ -163,7 +165,7 @@ func (dc *dbClientImpl) ExecuteFetchMulti(query string, maxrows int) ([]*sqltype func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := dcr.parser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -172,12 +174,12 @@ func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxr func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - qps, err := sqlparser.SplitStatementToPieces(query) + qps, err := dcr.parser.SplitStatementToPieces(query) if err != nil { return nil, err } for i, qp := range qps { - uq, err := sqlparser.ReplaceTableQualifiers(qp, sidecar.DefaultName, sidecar.GetName()) + uq, err := dcr.parser.ReplaceTableQualifiers(qp, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index ce07fbe9179..abc170ed493 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -42,6 +42,7 @@ type MockDBClient struct { done chan struct{} invariants map[string]*sqltypes.Result Tag string + parser *sqlparser.Parser } type mockExpect struct { @@ -84,15 +85,17 @@ func NewMockDBClient(t *testing.T) *MockDBClient { "set @@session.sql_mode": {}, "set sql_mode": {}, }, + parser: sqlparser.NewTestParser(), } } // NewMockDbaClient returns a new DBClientMock with the default "Dba" UName. func NewMockDbaClient(t *testing.T) *MockDBClient { return &MockDBClient{ - t: t, - UName: mockClientUNameDba, - done: make(chan struct{}), + t: t, + UName: mockClientUNameDba, + done: make(chan struct{}), + parser: sqlparser.NewTestParser(), } } @@ -227,7 +230,7 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re } func (dc *MockDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := dc.parser.SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/binlog/keyspace_id_resolver.go b/go/vt/binlog/keyspace_id_resolver.go index 6903ba53b71..1ca198760a3 100644 --- a/go/vt/binlog/keyspace_id_resolver.go +++ b/go/vt/binlog/keyspace_id_resolver.go @@ -17,13 +17,13 @@ limitations under the License. package binlog import ( + "context" "fmt" "strings" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -47,13 +47,13 @@ type keyspaceIDResolverFactory func(*schema.Table) (int, keyspaceIDResolver, err // newKeyspaceIDResolverFactory creates a new // keyspaceIDResolverFactory for the provided keyspace and cell. -func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { - return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell) +func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { + return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell, parser) } // newKeyspaceIDResolverFactoryV3 finds the SrvVSchema in the cell, // gets the keyspace part, and uses it to find the column name. -func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { +func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { srvVSchema, err := ts.GetSrvVSchema(ctx, cell) if err != nil { return nil, err @@ -62,7 +62,7 @@ func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspa if !ok { return nil, fmt.Errorf("SrvVSchema has no entry for keyspace %v", keyspace) } - keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace) + keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace, parser) if err != nil { return nil, fmt.Errorf("cannot build vschema for keyspace %v: %v", keyspace, err) } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 855bd940022..4397eccd4da 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -85,6 +86,7 @@ type UpdateStreamImpl struct { state atomic.Int64 stateWaitGroup sync.WaitGroup streams StreamList + parser *sqlparser.Parser } // StreamList is a map of context.CancelFunc to mass-interrupt ongoing @@ -138,12 +140,13 @@ type RegisterUpdateStreamServiceFunc func(UpdateStream) var RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc // NewUpdateStream returns a new UpdateStreamImpl object -func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine) *UpdateStreamImpl { +func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine, parser *sqlparser.Parser) *UpdateStreamImpl { return &UpdateStreamImpl{ ts: ts, keyspace: keyspace, cell: cell, se: se, + parser: parser, } } @@ -234,7 +237,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi return callback(trans) }) bls := NewStreamer(updateStream.cp, updateStream.se, charset, pos, 0, f) - bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell) + bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell, updateStream.parser) if err != nil { return fmt.Errorf("newKeyspaceIDResolverFactory failed: %v", err) } diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index 45b1fa47803..3b28a4b9e2e 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -110,8 +110,8 @@ type OnlineDDL struct { // ParseOnlineDDLStatement parses the given SQL into a statement and returns the action type of the DDL statement, or error // if the statement is not a DDL -func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { - stmt, err := sqlparser.Parse(sql) +func ParseOnlineDDLStatement(sql string, parser *sqlparser.Parser) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { + stmt, err := parser.Parse(sql) if err != nil { return nil, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error parsing statement: SQL=%s, error=%+v", sql, err) } @@ -122,10 +122,10 @@ func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action return ddlStmt, action, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query type: %s", sql) } -func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting) error { +func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, parser *sqlparser.Parser) error { // SQL statement sanity checks: if !ddlStmt.IsFullyParsed() { - if _, err := sqlparser.ParseStrictDDL(sql); err != nil { + if _, err := parser.ParseStrictDDL(sql); err != nil { // More information about the reason why the statement is not fully parsed: return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "%v", err) } @@ -147,12 +147,12 @@ func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStr } // NewOnlineDDLs takes a single DDL statement, normalizes it (potentially break down into multiple statements), and generates one or more OnlineDDL instances, one for each normalized statement -func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDLs [](*OnlineDDL), err error) { +func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDLs []*OnlineDDL, err error) { appendOnlineDDL := func(tableName string, ddlStmt sqlparser.DDLStatement) error { - if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting); err != nil { + if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting, parser); err != nil { return err } - onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID) + onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID, parser) if err != nil { return err } @@ -183,7 +183,7 @@ func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, } // NewOnlineDDL creates a schema change request with self generated UUID and RequestTime -func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDL *OnlineDDL, err error) { +func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDL *OnlineDDL, err error) { if ddlStrategySetting == nil { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "NewOnlineDDL: found nil DDLStrategySetting") } @@ -217,7 +217,7 @@ func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting sql = fmt.Sprintf("revert vitess_migration '%s'", uuid) } - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { isLegacyRevertStatement := false // query validation and rebuilding @@ -340,9 +340,9 @@ func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) { } // sqlWithoutComments returns the SQL statement without comment directives. Useful for tests -func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { +func (onlineDDL *OnlineDDL) sqlWithoutComments(parser *sqlparser.Parser) (sql string, err error) { sql = onlineDDL.SQL - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { // query validation and rebuilding if _, err := legacyParseRevertUUID(sql); err == nil { @@ -366,18 +366,18 @@ func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { } // GetAction extracts the DDL action type from the online DDL statement -func (onlineDDL *OnlineDDL) GetAction() (action sqlparser.DDLAction, err error) { - if _, err := onlineDDL.GetRevertUUID(); err == nil { +func (onlineDDL *OnlineDDL) GetAction(parser *sqlparser.Parser) (action sqlparser.DDLAction, err error) { + if _, err := onlineDDL.GetRevertUUID(parser); err == nil { return sqlparser.RevertDDLAction, nil } - _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL) + _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL, parser) return action, err } // IsView returns 'true' when the statement affects a VIEW -func (onlineDDL *OnlineDDL) IsView() bool { - stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL) +func (onlineDDL *OnlineDDL) IsView(parser *sqlparser.Parser) bool { + stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL, parser) if err != nil { return false } @@ -389,8 +389,8 @@ func (onlineDDL *OnlineDDL) IsView() bool { } // GetActionStr returns a string representation of the DDL action -func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionStr string, err error) { - action, err = onlineDDL.GetAction() +func (onlineDDL *OnlineDDL) GetActionStr(parser *sqlparser.Parser) (action sqlparser.DDLAction, actionStr string, err error) { + action, err = onlineDDL.GetAction(parser) if err != nil { return action, actionStr, err } @@ -410,11 +410,11 @@ func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionSt // GetRevertUUID works when this migration is a revert for another migration. It returns the UUID // fo the reverted migration. // The function returns error when this is not a revert migration. -func (onlineDDL *OnlineDDL) GetRevertUUID() (uuid string, err error) { +func (onlineDDL *OnlineDDL) GetRevertUUID(parser *sqlparser.Parser) (uuid string, err error) { if uuid, err := legacyParseRevertUUID(onlineDDL.SQL); err == nil { return uuid, nil } - if stmt, err := sqlparser.Parse(onlineDDL.SQL); err == nil { + if stmt, err := parser.Parse(onlineDDL.SQL); err == nil { if revert, ok := stmt.(*sqlparser.RevertMigration); ok { return revert.UUID, nil } diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index c616d64a698..942b9a4274e 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -52,10 +52,11 @@ func TestIsOnlineDDLUUID(t *testing.T) { } func TestGetGCUUID(t *testing.T) { + parser := sqlparser.NewTestParser() uuids := map[string]bool{} count := 20 for i := 0; i < count; i++ { - onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "") + onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "", parser) assert.NoError(t, err) gcUUID := onlineDDL.GetGCUUID() assert.True(t, IsGCUUID(gcUUID)) @@ -86,10 +87,11 @@ func TestGetActionStr(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(parser) if ts.isError { assert.Error(t, err) } else { @@ -147,10 +149,11 @@ func TestGetRevertUUID(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -162,10 +165,10 @@ func TestGetRevertUUID(t *testing.T) { migrationContext := "354b-11eb-82cd-f875a4d24e90" for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "", parser) assert.NoError(t, err) require.NotNil(t, onlineDDL) - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -209,11 +212,12 @@ func TestNewOnlineDDL(t *testing.T) { NewDDLStrategySetting(DDLStrategyOnline, "-singleton"), } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.sql, func(t *testing.T) { for _, stgy := range strategies { t.Run(stgy.ToString(), func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "", parser) if ts.isError { assert.Error(t, err) return @@ -231,19 +235,20 @@ func TestNewOnlineDDL(t *testing.T) { t.Run("explicit UUID", func(t *testing.T) { var err error var onlineDDL *OnlineDDL + parser := sqlparser.NewTestParser() - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) assert.NoError(t, err) assert.True(t, IsOnlineDDLUUID(onlineDDL.UUID)) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc", parser) assert.Error(t, err) - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.NoError(t, err) assert.Equal(t, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", onlineDDL.UUID) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.Error(t, err) }) } @@ -284,9 +289,10 @@ func TestNewOnlineDDLs(t *testing.T) { "CREATE TABLE if not exists t (id bigint unsigned NOT NULL AUTO_INCREMENT, ts datetime(6) DEFAULT NULL, error_column NO_SUCH_TYPE NOT NULL, PRIMARY KEY (id)) ENGINE=InnoDB": {isError: true, expectErrorText: "near"}, } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for query, expect := range tests { t.Run(query, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) if expect.parseError { assert.Error(t, err) return @@ -299,7 +305,7 @@ func TestNewOnlineDDLs(t *testing.T) { } assert.True(t, ok) - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) if expect.isError { assert.Error(t, err) assert.Contains(t, err.Error(), expect.expectErrorText) @@ -309,12 +315,12 @@ func TestNewOnlineDDLs(t *testing.T) { sqls := []string{} for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) sql = strings.ReplaceAll(sql, "\n", "") sql = strings.ReplaceAll(sql, "\t", "") sqls = append(sqls, sql) - assert.Equal(t, expect.isView, onlineDDL.IsView()) + assert.Equal(t, expect.isView, onlineDDL.IsView(parser)) } assert.Equal(t, expect.sqls, sqls) }) @@ -328,12 +334,13 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { for _, allowForeignKeys := range []bool{false, true} { testName := fmt.Sprintf("%t", allowForeignKeys) t.Run(testName, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) @@ -342,7 +349,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { if allowForeignKeys { flags = "--unsafe-allow-foreign-keys" } - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "", parser) if allowForeignKeys { assert.NoError(t, err) } else { @@ -351,7 +358,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) assert.NotEmpty(t, sql) } @@ -373,12 +380,13 @@ func TestOnlineDDLFromCommentedStatement(t *testing.T) { } strategySetting := NewDDLStrategySetting(DDLStrategyGhost, `-singleton -declarative --max-load="Threads_running=5"`) migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { - o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "") + o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "", parser) require.NoError(t, err) - stmt, err := sqlparser.Parse(o1.SQL) + stmt, err := parser.Parse(o1.SQL) require.NoError(t, err) o2, err := OnlineDDLFromCommentedStatement(stmt) diff --git a/go/vt/schemadiff/diff.go b/go/vt/schemadiff/diff.go index fce1e5e99db..b46a7d23cc6 100644 --- a/go/vt/schemadiff/diff.go +++ b/go/vt/schemadiff/diff.go @@ -27,11 +27,11 @@ func AllSubsequent(diff EntityDiff) (diffs []EntityDiff) { // DiffCreateTablesQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateTable, DropTable or AlterTable -func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints, parser *sqlparser.Parser) (EntityDiff, error) { var fromCreateTable *sqlparser.CreateTable var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := parser.ParseStrictDDL(query1) if err != nil { return nil, err } @@ -42,7 +42,7 @@ func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (En } var toCreateTable *sqlparser.CreateTable if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := parser.ParseStrictDDL(query2) if err != nil { return nil, err } @@ -89,11 +89,11 @@ func DiffTables(create1 *sqlparser.CreateTable, create2 *sqlparser.CreateTable, // DiffCreateViewsQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateView, DropView or AlterView -func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints, parser *sqlparser.Parser) (EntityDiff, error) { var fromCreateView *sqlparser.CreateView var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := parser.ParseStrictDDL(query1) if err != nil { return nil, err } @@ -104,7 +104,7 @@ func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (Ent } var toCreateView *sqlparser.CreateView if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := parser.ParseStrictDDL(query2) if err != nil { return nil, err } @@ -151,12 +151,12 @@ func DiffViews(create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hin // DiffSchemasSQL compares two schemas and returns the rich diff that turns // 1st schema into 2nd. Schemas are build from SQL, each of which can contain an arbitrary number of // CREATE TABLE and CREATE VIEW statements. -func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, error) { - schema1, err := NewSchemaFromSQL(sql1) +func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints, parser *sqlparser.Parser) (*SchemaDiff, error) { + schema1, err := NewSchemaFromSQL(sql1, parser) if err != nil { return nil, err } - schema2, err := NewSchemaFromSQL(sql2) + schema2, err := NewSchemaFromSQL(sql2, parser) if err != nil { return nil, err } diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go index d2a170f4752..231cb4a352b 100644 --- a/go/vt/schemadiff/diff_test.go +++ b/go/vt/schemadiff/diff_test.go @@ -190,6 +190,7 @@ func TestDiffTables(t *testing.T) { }, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateTable *sqlparser.CreateTable @@ -198,7 +199,7 @@ func TestDiffTables(t *testing.T) { hints = ts.hints } if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := parser.ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateTable, ok = fromStmt.(*sqlparser.CreateTable) @@ -206,7 +207,7 @@ func TestDiffTables(t *testing.T) { } var toCreateTable *sqlparser.CreateTable if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := parser.ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateTable, ok = toStmt.(*sqlparser.CreateTable) @@ -218,7 +219,7 @@ func TestDiffTables(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateTablesQueries(ts.from, ts.to, hints) + dq, dqerr := DiffCreateTablesQueries(ts.from, ts.to, hints, sqlparser.NewTestParser()) d, err := DiffTables(fromCreateTable, toCreateTable, hints) switch { case ts.isError: @@ -241,7 +242,7 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = parser.ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -260,7 +261,7 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = parser.ParseStrictDDL(canonicalDiff) assert.NoError(t, err) } // let's also check dq, and also validate that dq's statement is identical to d's @@ -322,11 +323,12 @@ func TestDiffViews(t *testing.T) { }, } hints := &DiffHints{} + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateView *sqlparser.CreateView if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := parser.ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateView, ok = fromStmt.(*sqlparser.CreateView) @@ -334,7 +336,7 @@ func TestDiffViews(t *testing.T) { } var toCreateView *sqlparser.CreateView if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := parser.ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateView, ok = toStmt.(*sqlparser.CreateView) @@ -346,7 +348,7 @@ func TestDiffViews(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateViewsQueries(ts.from, ts.to, hints) + dq, dqerr := DiffCreateViewsQueries(ts.from, ts.to, hints, parser) d, err := DiffViews(fromCreateView, toCreateView, hints) switch { case ts.isError: @@ -369,7 +371,7 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = parser.ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -388,7 +390,7 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = parser.ParseStrictDDL(canonicalDiff) assert.NoError(t, err) } @@ -796,12 +798,13 @@ func TestDiffSchemas(t *testing.T) { }, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { hints := &DiffHints{ TableRenameStrategy: ts.tableRename, } - diff, err := DiffSchemasSQL(ts.from, ts.to, hints) + diff, err := DiffSchemasSQL(ts.from, ts.to, hints, parser) if ts.expectError != "" { require.Error(t, err) assert.Contains(t, err.Error(), ts.expectError) @@ -827,21 +830,21 @@ func TestDiffSchemas(t *testing.T) { // validate we can parse back the diff statements for _, s := range statements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := parser.ParseStrictDDL(s) assert.NoError(t, err) } for _, s := range cstatements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := parser.ParseStrictDDL(s) assert.NoError(t, err) } { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(ts.from, parser) require.NoError(t, err) schema1SQL := schema1.ToSQL() - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(ts.to, parser) require.NoError(t, err) applied, err := schema1.Apply(diffs) require.NoError(t, err) @@ -892,12 +895,13 @@ func TestSchemaApplyError(t *testing.T) { }, } hints := &DiffHints{} + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(ts.from, parser) assert.NoError(t, err) - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(ts.to, parser) assert.NoError(t, err) { diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go index 61b06236bb4..5e776dbd3b3 100644 --- a/go/vt/schemadiff/schema.go +++ b/go/vt/schemadiff/schema.go @@ -98,10 +98,10 @@ func NewSchemaFromStatements(statements []sqlparser.Statement) (*Schema, error) } // NewSchemaFromQueries creates a valid and normalized schema based on list of queries -func NewSchemaFromQueries(queries []string) (*Schema, error) { +func NewSchemaFromQueries(queries []string, parser *sqlparser.Parser) (*Schema, error) { statements := make([]sqlparser.Statement, 0, len(queries)) for _, q := range queries { - stmt, err := sqlparser.ParseStrictDDL(q) + stmt, err := parser.ParseStrictDDL(q) if err != nil { return nil, err } @@ -112,9 +112,9 @@ func NewSchemaFromQueries(queries []string) (*Schema, error) { // NewSchemaFromSQL creates a valid and normalized schema based on a SQL blob that contains // CREATE statements for various objects (tables, views) -func NewSchemaFromSQL(sql string) (*Schema, error) { +func NewSchemaFromSQL(sql string, parser *sqlparser.Parser) (*Schema, error) { var statements []sqlparser.Statement - tokenizer := sqlparser.NewStringTokenizer(sql) + tokenizer := parser.NewStringTokenizer(sql) for { stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) if err != nil { diff --git a/go/vt/schemadiff/schema_diff_test.go b/go/vt/schemadiff/schema_diff_test.go index df7d893356f..c41ee8e7839 100644 --- a/go/vt/schemadiff/schema_diff_test.go +++ b/go/vt/schemadiff/schema_diff_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestPermutations(t *testing.T) { @@ -163,11 +165,11 @@ func TestPermutations(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(tc.fromQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(tc.toQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, toSchema) @@ -842,11 +844,11 @@ func TestSchemaDiff(t *testing.T) { if tc.fromQueries == nil { tc.fromQueries = createQueries } - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(tc.fromQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(tc.toQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, toSchema) diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index a1b55544593..05bf6c12df6 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -84,7 +84,7 @@ var schemaTestExpectSortedViewNames = []string{ var schemaTestToSQL = "CREATE TABLE `t1` (\n\t`id` int\n);\nCREATE TABLE `t2` (\n\t`id` int\n);\nCREATE TABLE `t3` (\n\t`id` int,\n\t`type` enum('foo', 'bar') NOT NULL DEFAULT 'foo'\n);\nCREATE TABLE `t5` (\n\t`id` int\n);\nCREATE VIEW `v0` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v3` AS SELECT *, `id` + 1 AS `id_plus`, `id` + 2 FROM `t3` AS `t3`;\nCREATE VIEW `v9` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v1` AS SELECT * FROM `v3`;\nCREATE VIEW `v2` AS SELECT * FROM `v3`, `t2`;\nCREATE VIEW `v4` AS SELECT * FROM `t2` AS `something_else`, `v3`;\nCREATE VIEW `v5` AS SELECT * FROM `t1`, (SELECT * FROM `v3`) AS `some_alias`;\nCREATE VIEW `v6` AS SELECT * FROM `v4`;\n" func TestNewSchemaFromQueries(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -94,7 +94,7 @@ func TestNewSchemaFromQueries(t *testing.T) { } func TestNewSchemaFromSQL(t *testing.T) { - schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";")) + schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";"), sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -108,7 +108,7 @@ func TestNewSchemaFromQueriesWithDuplicate(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v2 as select * from v1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ApplyDuplicateEntityError{Entity: "v2"}).Error()) } @@ -118,7 +118,7 @@ func TestNewSchemaFromQueriesUnresolved(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from v8, t2", ) - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) v := schema.sorted[len(schema.sorted)-1] @@ -131,7 +131,7 @@ func TestNewSchemaFromQueriesUnresolvedAlias(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from something_else as t1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } @@ -141,7 +141,7 @@ func TestNewSchemaFromQueriesViewFromDual(t *testing.T) { queries := []string{ "create view v20 as select 1 from dual", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.NoError(t, err) } @@ -150,7 +150,7 @@ func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) { queries := []string{ "create view v20 as select 1", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.NoError(t, err) } @@ -160,14 +160,14 @@ func TestNewSchemaFromQueriesLoop(t *testing.T) { "create view v7 as select * from v8, t2", "create view v8 as select * from t1, v7", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) require.Error(t, err) err = vterrors.UnwrapFirst(err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } func TestToSQL(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -176,7 +176,7 @@ func TestToSQL(t *testing.T) { } func TestCopy(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -223,7 +223,7 @@ func TestGetViewDependentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.view, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.view) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.view) require.NoError(t, err) createView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) @@ -263,7 +263,7 @@ func TestGetForeignKeyParentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.table, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.table) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.table) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -299,7 +299,7 @@ func TestTableForeignKeyOrdering(t *testing.T) { "v13", "v09", } - schema, err := NewSchemaFromQueries(fkQueries) + schema, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) require.NoError(t, err) assert.NotNil(t, schema) @@ -407,7 +407,7 @@ func TestInvalidSchema(t *testing.T) { for _, ts := range tt { t.Run(ts.schema, func(t *testing.T) { - _, err := NewSchemaFromSQL(ts.schema) + _, err := NewSchemaFromSQL(ts.schema, sqlparser.NewTestParser()) if ts.expectErr == nil { assert.NoError(t, err) } else { @@ -425,7 +425,7 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t15(id int, primary key(id))", } - s, err := NewSchemaFromQueries(fkQueries) + s, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) assert.Error(t, err) // Even though there's errors, we still expect the schema to have been created. assert.NotNil(t, s) @@ -443,7 +443,7 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)", } - _, err := NewSchemaFromQueries(fkQueries) + _, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) assert.Error(t, err) assert.ErrorContains(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error()) assert.ErrorContains(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t12"}).Error()) @@ -468,7 +468,7 @@ func TestGetEntityColumnNames(t *testing.T) { "create view vb as select *, now() from v8", } - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, schema) @@ -746,7 +746,7 @@ func TestViewReferences(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - schema, err := NewSchemaFromQueries(ts.queries) + schema, err := NewSchemaFromQueries(ts.queries, sqlparser.NewTestParser()) if ts.expectErr == nil { require.NoError(t, err) require.NotNil(t, schema) @@ -838,9 +838,9 @@ func TestMassiveSchema(t *testing.T) { queries1 = append(queries1, query) tableNames[tableName] = true } - schema0, err = NewSchemaFromQueries(queries0) + schema0, err = NewSchemaFromQueries(queries0, sqlparser.NewTestParser()) require.NoError(t, err) - schema1, err = NewSchemaFromQueries(queries1) + schema1, err = NewSchemaFromQueries(queries1, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, countModifiedTables, modifyTables) diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index e2ef58c1a6f..5e159ffca99 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -1267,12 +1267,12 @@ func TestCreateTableDiff(t *testing.T) { standardHints := DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -1332,7 +1332,7 @@ func TestCreateTableDiff(t *testing.T) { } } // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(diff) assert.NoError(t, err) // Validate "from/to" entities @@ -1362,7 +1362,7 @@ func TestCreateTableDiff(t *testing.T) { { cdiff := alter.CanonicalStatementString() assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(cdiff) assert.NoError(t, err) } @@ -1859,12 +1859,12 @@ func TestValidate(t *testing.T) { hints := DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(ts.alter) + stmt, err = sqlparser.NewTestParser().ParseStrictDDL(ts.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -1888,7 +1888,7 @@ func TestValidate(t *testing.T) { require.True(t, ok) applied = c.normalize() - stmt, err := sqlparser.ParseStrictDDL(ts.to) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -2172,7 +2172,7 @@ func TestNormalize(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -2261,7 +2261,7 @@ func TestIndexesCoveringForeignKeyColumns(t *testing.T) { }, } - stmt, err := sqlparser.ParseStrictDDL(sql) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(sql) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/vt/schemadiff/view_test.go b/go/vt/schemadiff/view_test.go index 939308d056c..d32739d7190 100644 --- a/go/vt/schemadiff/view_test.go +++ b/go/vt/schemadiff/view_test.go @@ -148,12 +148,12 @@ func TestCreateViewDiff(t *testing.T) { hints := &DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) assert.NoError(t, err) fromCreateView, ok := fromStmt.(*sqlparser.CreateView) assert.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) assert.NoError(t, err) toCreateView, ok := toStmt.(*sqlparser.CreateView) assert.True(t, ok) @@ -177,7 +177,7 @@ func TestCreateViewDiff(t *testing.T) { diff := alter.StatementString() assert.Equal(t, ts.diff, diff) // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := alter.Entities() @@ -199,7 +199,7 @@ func TestCreateViewDiff(t *testing.T) { { cdiff := alter.CanonicalStatementString() assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(cdiff) assert.NoError(t, err) } } @@ -241,7 +241,7 @@ func TestNormalizeView(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 154d985bba4..b4724241cd1 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" @@ -94,7 +96,7 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) controller.SetKeyspace("unknown_keyspace") - executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() _, err := Run(ctx, controller, executor) @@ -125,7 +127,7 @@ func TestSchemaManagerRun(t *testing.T) { }) fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) - executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) @@ -176,7 +178,7 @@ func TestSchemaManagerExecutorFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) @@ -196,7 +198,7 @@ func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("online") ctx := context.Background() @@ -212,7 +214,7 @@ func TestSchemaManagerExecutorBatchVsQueriesFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") ctx := context.Background() @@ -228,7 +230,7 @@ func TestSchemaManagerExecutorBatchVsUUIDsFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") executor.SetUUIDList([]string{"4e5dcf80_354b_11eb_82cd_f875a4d24e90"}) @@ -271,7 +273,7 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { } func newFakeExecutor(t *testing.T) *TabletExecutor { - return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) } func newFakeTabletManagerClient() *fakeTabletManagerClient { diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index a56a95d5034..cd1691dd01e 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -53,10 +53,11 @@ type TabletExecutor struct { ddlStrategySetting *schema.DDLStrategySetting uuids []string batchSize int64 + parser *sqlparser.Parser } // NewTabletExecutor creates a new TabletExecutor instance -func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64) *TabletExecutor { +func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64, parser *sqlparser.Parser) *TabletExecutor { return &TabletExecutor{ ts: ts, tmc: tmc, @@ -65,6 +66,7 @@ func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.Ta waitReplicasTimeout: waitReplicasTimeout, migrationContext: migrationContext, batchSize: batchSize, + parser: parser, } } @@ -146,7 +148,7 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { func (exec *TabletExecutor) parseDDLs(sqls []string) error { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -204,14 +206,14 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided return executeViaFetch() } // Analyze what type of query this is: - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return false, err } switch stmt := stmt.(type) { case sqlparser.DDLStatement: if exec.isOnlineSchemaDDL(stmt) { - onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID) + onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -227,7 +229,7 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided } case *sqlparser.RevertMigration: strategySetting := schema.NewDDLStrategySetting(schema.DDLStrategyOnline, exec.ddlStrategySetting.Options) - onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID) + onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -265,9 +267,9 @@ func batchSQLs(sqls []string, batchSize int) (batchedSQLs []string) { // allSQLsAreCreateQueries returns 'true' when all given queries are CREATE TABLE|VIEW // This function runs pretty fast even for thousands of tables (its overhead is insignificant compared with // the time it would take to apply the changes). -func allSQLsAreCreateQueries(sqls []string) (bool, error) { +func allSQLsAreCreateQueries(sqls []string, parser *sqlparser.Parser) (bool, error) { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -377,7 +379,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute if exec.hasProvidedUUIDs() { return errorExecResult(fmt.Errorf("--batch-size conflicts with --uuid-list. Batching does not support UUIDs.")) } - allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls) + allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls, exec.parser) if err != nil { return errorExecResult(err) } @@ -444,16 +446,16 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult // applyAllowZeroInDate takes a SQL string which may contain one or more statements, // and, assuming those are DDLs, adds a /*vt+ allowZeroInDate=true */ directive to all of them, // returning the result again as one long SQL. -func applyAllowZeroInDate(sql string) (string, error) { +func applyAllowZeroInDate(sql string, parser *sqlparser.Parser) (string, error) { // sql may be a batch of multiple statements - sqls, err := sqlparser.SplitStatementToPieces(sql) + sqls, err := parser.SplitStatementToPieces(sql) if err != nil { return sql, err } var modifiedSqls []string for _, singleSQL := range sqls { // --allow-zero-in-date Applies to DDLs - stmt, err := sqlparser.Parse(singleSQL) + stmt, err := parser.Parse(singleSQL) if err != nil { return sql, err } @@ -486,7 +488,7 @@ func (exec *TabletExecutor) executeOneTablet( } else { if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowZeroInDateFlag() { // --allow-zero-in-date Applies to DDLs - sql, err = applyAllowZeroInDate(sql) + sql, err = applyAllowZeroInDate(sql, exec.parser) if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} return diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 175e10dfb66..0ae960e6e9c 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -72,7 +72,7 @@ func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { if err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { t.Fatalf("InitTablet failed: %v", err) } - executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) if err := executor.Open(ctx, "test_keyspace"); err == nil || !strings.Contains(err.Error(), "does not have a primary") { t.Fatalf("executor.Open() = '%v', want error", err) } @@ -105,7 +105,7 @@ func TestTabletExecutorValidate(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() sqls := []string{ @@ -179,7 +179,7 @@ func TestTabletExecutorDML(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() executor.Open(ctx, "unsharded_keyspace") @@ -269,12 +269,13 @@ func TestIsOnlineSchemaDDL(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { e := &TabletExecutor{} err := e.SetDDLStrategy(ts.ddlStrategy) assert.NoError(t, err) - stmt, err := sqlparser.Parse(ts.query) + stmt, err := parser.Parse(ts.query) assert.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) @@ -402,7 +403,7 @@ func TestAllSQLsAreCreateQueries(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - result, err := allSQLsAreCreateQueries(tcase.sqls) + result, err := allSQLsAreCreateQueries(tcase.sqls, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) @@ -437,7 +438,7 @@ func TestApplyAllowZeroInDate(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - result, err := applyAllowZeroInDate(tcase.sql) + result, err := applyAllowZeroInDate(tcase.sql, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) diff --git a/go/vt/servenv/mysql.go b/go/vt/servenv/mysql.go index c8a46cb2da6..6a9b48e495b 100644 --- a/go/vt/servenv/mysql.go +++ b/go/vt/servenv/mysql.go @@ -17,13 +17,17 @@ limitations under the License. package servenv import ( + "fmt" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/mysql/config" ) // mySQLServerVersion is what Vitess will present as it's version during the connection handshake, // and as the value to the @@version system variable. If nothing is provided, Vitess will report itself as // a specific MySQL version with the vitess version appended to it -var mySQLServerVersion = "8.0.30-Vitess" +var mySQLServerVersion = fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion) // RegisterMySQLServerFlags installs the flags needed to specify or expose a // particular MySQL server version from Vitess. diff --git a/go/vt/servenv/truncate_query.go b/go/vt/servenv/truncate_query.go new file mode 100644 index 00000000000..fdb618c5c6a --- /dev/null +++ b/go/vt/servenv/truncate_query.go @@ -0,0 +1,34 @@ +package servenv + +import ( + "github.com/spf13/pflag" +) + +var ( + // TruncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. + TruncateUILen = 512 + + // TruncateErrLen truncate queries in error logs to the given length. 0 means unlimited. + TruncateErrLen = 0 +) + +func registerQueryTruncationFlags(fs *pflag.FlagSet) { + fs.IntVar(&TruncateUILen, "sql-max-length-ui", TruncateUILen, "truncate queries in debug UIs to the given length (default 512)") + fs.IntVar(&TruncateErrLen, "sql-max-length-errors", TruncateErrLen, "truncate queries in error logs to the given length (default unlimited)") +} + +func init() { + for _, cmd := range []string{ + "vtgate", + "vttablet", + "vtcombo", + "vtctld", + "vtctl", + "vtexplain", + "vtbackup", + "vttestserver", + "vtbench", + } { + OnParseFor(cmd, registerQueryTruncationFlags) + } +} diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index 4b8c37039d7..4f3ea2e8252 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -114,8 +114,8 @@ func init() { })) } -func validateSchemaDefinition(name, schema string) (string, error) { - stmt, err := sqlparser.ParseStrictDDL(schema) +func validateSchemaDefinition(name, schema string, parser *sqlparser.Parser) (string, error) { + stmt, err := parser.ParseStrictDDL(schema) if err != nil { return "", err @@ -143,7 +143,7 @@ func validateSchemaDefinition(name, schema string) (string, error) { // loadSchemaDefinitions loads the embedded schema definitions // into a slice of sidecarTables for processing. -func loadSchemaDefinitions() { +func loadSchemaDefinitions(parser *sqlparser.Parser) { sqlFileExtension := ".sql" err := fs.WalkDir(schemaLocation, ".", func(path string, entry fs.DirEntry, err error) error { if err != nil { @@ -172,7 +172,7 @@ func loadSchemaDefinitions() { panic(err) } var normalizedSchema string - if normalizedSchema, err = validateSchemaDefinition(name, string(schema)); err != nil { + if normalizedSchema, err = validateSchemaDefinition(name, string(schema), parser); err != nil { return err } sidecarTables = append(sidecarTables, &sidecarTable{name: name, module: module, path: path, schema: normalizedSchema}) @@ -197,6 +197,7 @@ type schemaInit struct { ctx context.Context exec Exec dbCreated bool // The first upgrade/create query will also create the sidecar database if required. + parser *sqlparser.Parser } // Exec is a callback that has to be passed to Init() to @@ -228,15 +229,18 @@ func getDDLErrorHistory() []*ddlError { // Init creates or upgrades the sidecar database based on // the declarative schema defined for all tables. -func Init(ctx context.Context, exec Exec) error { +func Init(ctx context.Context, exec Exec, parser *sqlparser.Parser) error { printCallerDetails() // for debug purposes only, remove in v17 log.Infof("Starting sidecardb.Init()") - once.Do(loadSchemaDefinitions) + once.Do(func() { + loadSchemaDefinitions(parser) + }) si := &schemaInit{ - ctx: ctx, - exec: exec, + ctx: ctx, + exec: exec, + parser: parser, } // There are paths in the tablet initialization where we @@ -371,7 +375,7 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s TableCharsetCollateStrategy: schemadiff.TableCharsetCollateIgnoreAlways, AlterTableAlgorithmStrategy: schemadiff.AlterTableAlgorithmStrategyCopy, } - diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints) + diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints, si.parser) if err != nil { return "", err } @@ -459,8 +463,10 @@ func (t *sidecarTable) String() string { // AddSchemaInitQueries adds sidecar database schema related // queries to a mock db. // This is for unit tests only! -func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { - once.Do(loadSchemaDefinitions) +func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool, parser *sqlparser.Parser) { + once.Do(func() { + loadSchemaDefinitions(parser) + }) result := &sqltypes.Result{} for _, q := range sidecar.DBInitQueryPatterns { db.AddQueryPattern(q, result) diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go index feda4966418..1565e0cb754 100644 --- a/go/vt/sidecardb/sidecardb_test.go +++ b/go/vt/sidecardb/sidecardb_test.go @@ -42,7 +42,8 @@ func TestInitErrors(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + parser := sqlparser.NewTestParser() + AddSchemaInitQueries(db, false, parser) ddlErrorCount.Set(0) ddlCount.Set(0) @@ -70,7 +71,7 @@ func TestInitErrors(t *testing.T) { } // simulate errors for the table creation DDLs applied for tables specified in schemaErrors - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) if err != nil { return nil, err } @@ -86,7 +87,7 @@ func TestInitErrors(t *testing.T) { } require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), getDDLCount()) require.Equal(t, int64(len(schemaErrors)), getDDLErrorCount()) @@ -125,7 +126,8 @@ func TestMiscSidecarDB(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + parser := sqlparser.NewTestParser() + AddSchemaInitQueries(db, false, parser) db.AddQuery("use dbname", &sqltypes.Result{}) db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{}) @@ -150,29 +152,30 @@ func TestMiscSidecarDB(t *testing.T) { require.NoError(t, err) db.AddQuery(dbeq, result) db.AddQuery(sidecar.GetCreateQuery(), &sqltypes.Result{}) - AddSchemaInitQueries(db, false) + AddSchemaInitQueries(db, false, parser) // tests init on empty db ddlErrorCount.Set(0) ddlCount.Set(0) require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // Include the table DDLs in the expected queries. // This causes them to NOT be created again. - AddSchemaInitQueries(db, true) + AddSchemaInitQueries(db, true, parser) // tests init on already inited db - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // tests misc paths not covered above si := &schemaInit{ - ctx: ctx, - exec: exec, + ctx: ctx, + exec: exec, + parser: parser, } err = si.setCurrentDatabase(sidecar.GetIdentifier()) @@ -197,9 +200,10 @@ func TestValidateSchema(t *testing.T) { {"invalid table name", "t1", "create table if not exists t2(i int)", true}, {"qualifier", "t1", "create table if not exists vt_product.t1(i int)", true}, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { - _, err := validateSchemaDefinition(tc.name, tc.schema) + _, err := validateSchemaDefinition(tc.name, tc.schema, parser) if tc.mustError { require.Error(t, err) } else { @@ -221,13 +225,15 @@ func TestAlterTableAlgorithm(t *testing.T) { {"add column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t1(i int, i1 int)"}, {"modify column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t(i float)"}, } - si := &schemaInit{} + si := &schemaInit{ + parser: sqlparser.NewTestParser(), + } copyAlgo := sqlparser.AlgorithmValue("COPY") for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { diff, err := si.findTableSchemaDiff(tc.tableName, tc.currentSchema, tc.desiredSchema) require.NoError(t, err) - stmt, err := sqlparser.Parse(diff) + stmt, err := si.parser.Parse(diff) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index b4015f7937b..ea0773d99cc 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -344,8 +344,8 @@ func IsDMLStatement(stmt Statement) bool { // TableFromStatement returns the qualified table name for the query. // This works only for select statements. -func TableFromStatement(sql string) (TableName, error) { - stmt, err := Parse(sql) +func (p *Parser) TableFromStatement(sql string) (TableName, error) { + stmt, err := p.Parse(sql) if err != nil { return TableName{}, err } diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index 9f6a451770e..0a2de52ef19 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -145,8 +145,9 @@ func TestSplitAndExpression(t *testing.T) { sql: "select * from t where (a = 1 and ((b = 1 and c = 1)))", out: []string{"a = 1", "b = 1", "c = 1"}, }} + parser := NewTestParser() for _, tcase := range testcases { - stmt, err := Parse(tcase.sql) + stmt, err := parser.Parse(tcase.sql) assert.NoError(t, err) var expr Expr if where := stmt.(*Select).Where; where != nil { @@ -259,9 +260,9 @@ func TestTableFromStatement(t *testing.T) { in: "bad query", out: "syntax error at position 4 near 'bad'", }} - + parser := NewTestParser() for _, tc := range testcases { - name, err := TableFromStatement(tc.in) + name, err := parser.TableFromStatement(tc.in) var got string if err != nil { got = err.Error() @@ -288,8 +289,9 @@ func TestGetTableName(t *testing.T) { out: "", }} + parser := NewTestParser() for _, tc := range testcases { - tree, err := Parse(tc.in) + tree, err := parser.Parse(tc.in) if err != nil { t.Error(err) continue diff --git a/go/vt/sqlparser/ast_copy_on_rewrite_test.go b/go/vt/sqlparser/ast_copy_on_rewrite_test.go index 389b2a4bc29..bb2bd5b886e 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite_test.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite_test.go @@ -24,8 +24,9 @@ import ( ) func TestCopyOnRewrite(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { col, ok := cursor.Node().(*ColName) @@ -42,9 +43,10 @@ func TestCopyOnRewrite(t *testing.T) { } func TestCopyOnRewriteDeeper(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original. the changed happens deep in the syntax tree, // here we are testing that all ancestors up to the root are cloned correctly - expr, err := ParseExpr("a + b * c = 12") + expr, err := parser.ParseExpr("a + b * c = 12") require.NoError(t, err) var path []string out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { @@ -72,8 +74,9 @@ func TestCopyOnRewriteDeeper(t *testing.T) { } func TestDontCopyWithoutRewrite(t *testing.T) { + parser := NewTestParser() // when no rewriting happens, we want the original back - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {}, nil) @@ -81,9 +84,10 @@ func TestDontCopyWithoutRewrite(t *testing.T) { } func TestStopTreeWalk(t *testing.T) { + parser := NewTestParser() // stop walking down part of the AST original := "a = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) @@ -102,9 +106,10 @@ func TestStopTreeWalk(t *testing.T) { } func TestStopTreeWalkButStillVisit(t *testing.T) { + parser := NewTestParser() // here we are asserting that even when we stop at the binary expression, we still visit it in the post visitor original := "1337 = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 86bab314dd8..3ad9a5298c4 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -335,11 +335,11 @@ func TestRewrites(in *testing.T) { socket: true, queryTimeout: true, }} - + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST( @@ -353,7 +353,7 @@ func TestRewrites(in *testing.T) { ) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) s := String(expected) @@ -392,7 +392,8 @@ func (*fakeViews) FindView(name TableName) SelectStatement { if name.Name.String() != "user_details" { return nil } - statement, err := Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") + parser := NewTestParser() + statement, err := parser.Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") if err != nil { return nil } @@ -434,16 +435,17 @@ func TestRewritesWithSetVarComment(in *testing.T) { setVarComment: "AA(a)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -482,16 +484,17 @@ func TestRewritesSysVar(in *testing.T) { expected: "select :__vttransaction_isolation as `@@session.transaction_isolation` from dual", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -532,16 +535,17 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) { expected: "SELECT 2 as `(select 2 from dual)` from DUAL", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 97b93a80379..b1181e83db1 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -30,8 +30,9 @@ import ( ) func TestAppend(t *testing.T) { + parser := NewTestParser() query := "select * from t where a = 1" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) var b strings.Builder Append(&b, tree) @@ -49,9 +50,10 @@ func TestAppend(t *testing.T) { } func TestSelect(t *testing.T) { - e1, err := ParseExpr("a = 1") + parser := NewTestParser() + e1, err := parser.ParseExpr("a = 1") require.NoError(t, err) - e2, err := ParseExpr("b = 2") + e2, err := parser.ParseExpr("b = 2") require.NoError(t, err) t.Run("single predicate where", func(t *testing.T) { sel := &Select{} @@ -81,7 +83,8 @@ func TestSelect(t *testing.T) { } func TestUpdate(t *testing.T) { - tree, err := Parse("update t set a = 1") + parser := NewTestParser() + tree, err := parser.Parse("update t set a = 1") require.NoError(t, err) upd, ok := tree.(*Update) @@ -103,11 +106,12 @@ func TestUpdate(t *testing.T) { } func TestRemoveHints(t *testing.T) { + parser := NewTestParser() for _, query := range []string{ "select * from t use index (i)", "select * from t force index (i)", } { - tree, err := Parse(query) + tree, err := parser.Parse(query) if err != nil { t.Fatal(err) } @@ -124,16 +128,17 @@ func TestRemoveHints(t *testing.T) { } func TestAddOrder(t *testing.T) { - src, err := Parse("select foo, bar from baz order by foo") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz order by foo") require.NoError(t, err) order := src.(*Select).OrderBy[0] - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).AddOrder(order) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t order by foo asc", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).AddOrder(order) buf = NewTrackedBuffer(nil) @@ -142,16 +147,17 @@ func TestAddOrder(t *testing.T) { } func TestSetLimit(t *testing.T) { - src, err := Parse("select foo, bar from baz limit 4") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz limit 4") require.NoError(t, err) limit := src.(*Select).Limit - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).SetLimit(limit) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t limit 4", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).SetLimit(limit) buf = NewTrackedBuffer(nil) @@ -213,8 +219,9 @@ func TestDDL(t *testing.T) { }, affected: []string{"a", "b"}, }} + parser := NewTestParser() for _, tcase := range testcases { - got, err := Parse(tcase.query) + got, err := parser.Parse(tcase.query) if err != nil { t.Fatal(err) } @@ -232,7 +239,8 @@ func TestDDL(t *testing.T) { } func TestSetAutocommitON(t *testing.T) { - stmt, err := Parse("SET autocommit=ON") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=ON") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -257,7 +265,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=ON") + stmt, err = parser.Parse("SET @@session.autocommit=ON") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -284,7 +292,8 @@ func TestSetAutocommitON(t *testing.T) { } func TestSetAutocommitOFF(t *testing.T) { - stmt, err := Parse("SET autocommit=OFF") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=OFF") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -309,7 +318,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=OFF") + stmt, err = parser.Parse("SET @@session.autocommit=OFF") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -491,9 +500,10 @@ func TestReplaceExpr(t *testing.T) { out: "case a when b then c when d then c else :a end", }} to := NewArgument("a") + parser := NewTestParser() for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - tree, err := Parse(tcase.in) + tree, err := parser.Parse(tcase.in) require.NoError(t, err) var from *Subquery _ = Walk(func(node SQLNode) (kontinue bool, err error) { @@ -738,13 +748,14 @@ func TestSplitStatementToPieces(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - stmtPieces, err := SplitStatementToPieces(tcase.input) + stmtPieces, err := parser.SplitStatementToPieces(tcase.input) require.NoError(t, err) out := strings.Join(stmtPieces, ";") @@ -766,13 +777,15 @@ func TestDefaultStatus(t *testing.T) { } func TestShowTableStatus(t *testing.T) { + parser := NewTestParser() query := "Show Table Status FROM customer" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) require.NotNil(t, tree) } func BenchmarkStringTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -782,7 +795,7 @@ func BenchmarkStringTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) for _, q := range queries { - pp, err := Parse(q) + pp, err := parser.Parse(q) if err != nil { b.Fatal(err) } diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go index d86b8a21155..a31b5767baa 100644 --- a/go/vt/sqlparser/cached_size.go +++ b/go/vt/sqlparser/cached_size.go @@ -3064,7 +3064,7 @@ func (cached *ParsedQuery) CachedSize(alloc bool) int64 { } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) - // field bindLocations []vitess.io/vitess/go/vt/sqlparser.bindLocation + // field bindLocations []vitess.io/vitess/go/vt/sqlparser.BindLocation { size += hack.RuntimeAllocSize(int64(cap(cached.bindLocations)) * int64(16)) } diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go index 734cd28e088..dd22fd7000c 100644 --- a/go/vt/sqlparser/comments_test.go +++ b/go/vt/sqlparser/comments_test.go @@ -322,6 +322,7 @@ func TestExtractCommentDirectives(t *testing.T) { }, }} + parser := NewTestParser() for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { sqls := []string{ @@ -339,7 +340,7 @@ func TestExtractCommentDirectives(t *testing.T) { for _, sql := range sqls { t.Run(sql, func(t *testing.T) { var comments *ParsedComments - stmt, _ := Parse(sql) + stmt, _ := parser.Parse(sql) switch s := stmt.(type) { case *Select: comments = s.Comments @@ -394,19 +395,20 @@ func TestExtractCommentDirectives(t *testing.T) { } func TestSkipQueryPlanCacheDirective(t *testing.T) { - stmt, _ := Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") + parser := NewTestParser() + stmt, _ := parser.Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("insert into user(id) values (1), (2)") + stmt, _ = parser.Parse("insert into user(id) values (1), (2)") assert.True(t, CachePlan(stmt)) - stmt, _ = Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") + stmt, _ = parser.Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") + stmt, _ = parser.Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") + stmt, _ = parser.Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") assert.False(t, CachePlan(stmt)) } @@ -427,9 +429,10 @@ func TestIgnoreMaxPayloadSizeDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxPayloadSizeDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -453,9 +456,10 @@ func TestIgnoreMaxMaxMemoryRowsDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxMaxMemoryRowsDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -479,9 +483,10 @@ func TestConsolidator(t *testing.T) { {"select /*vt+ CONSOLIDATOR=enabled_replicas */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := Consolidator(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("Consolidator(stmt) returned %v but expected %v", got, test.expected)) }) @@ -536,11 +541,12 @@ func TestGetPriorityFromStatement(t *testing.T) { }, } + parser := NewTestParser() for _, testCase := range testCases { theThestCase := testCase t.Run(theThestCase.query, func(t *testing.T) { t.Parallel() - stmt, err := Parse(theThestCase.query) + stmt, err := parser.Parse(theThestCase.query) assert.NoError(t, err) actualPriority, actualError := GetPriorityFromStatement(stmt) if theThestCase.expectedError != nil { diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go index 0209ee20352..d386339a57f 100644 --- a/go/vt/sqlparser/keywords_test.go +++ b/go/vt/sqlparser/keywords_test.go @@ -32,6 +32,7 @@ func TestCompatibility(t *testing.T) { require.NoError(t, err) defer file.Close() + parser := NewTestParser() scanner := bufio.NewScanner(file) skipStep := 4 for scanner.Scan() { @@ -46,7 +47,7 @@ func TestCompatibility(t *testing.T) { word = "`" + word + "`" } sql := fmt.Sprintf("create table %s(c1 int)", word) - _, err := ParseStrictDDL(sql) + _, err := parser.ParseStrictDDL(sql) if err != nil { t.Errorf("%s is not compatible with mysql", word) } diff --git a/go/vt/sqlparser/like_filter_test.go b/go/vt/sqlparser/like_filter_test.go index 242e45e2f8d..3249eb152b9 100644 --- a/go/vt/sqlparser/like_filter_test.go +++ b/go/vt/sqlparser/like_filter_test.go @@ -30,7 +30,8 @@ func TestEmptyLike(t *testing.T) { } func TestLikePrefixRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like 'key%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like 'key%'") if e != nil { t.Error(e) } @@ -42,7 +43,8 @@ func TestLikePrefixRegexp(t *testing.T) { } func TestLikeAnyCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '%val1%val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '%val1%val2%'") if e != nil { t.Error(e) } @@ -54,7 +56,8 @@ func TestLikeAnyCharsRegexp(t *testing.T) { } func TestSingleAndMultipleCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '_val1_val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '_val1_val2%'") if e != nil { t.Error(e) } @@ -66,7 +69,8 @@ func TestSingleAndMultipleCharsRegexp(t *testing.T) { } func TestSpecialCharactersRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '?.*?'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '?.*?'") if e != nil { t.Error(e) } @@ -78,7 +82,8 @@ func TestSpecialCharactersRegexp(t *testing.T) { } func TestQuoteLikeSpecialCharacters(t *testing.T) { - show, e := Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) + parser := NewTestParser() + show, e := parser.Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) if e != nil { t.Error(e) } diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index f0771d437fa..18f2ad44a7f 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -389,9 +389,10 @@ func TestNormalize(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3), }, }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -416,9 +417,10 @@ func TestNormalizeInvalidDates(t *testing.T) { in: "select timestamp'foo'", err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect DATETIME value: '%s'", "foo"), }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -428,12 +430,13 @@ func TestNormalizeInvalidDates(t *testing.T) { } func TestNormalizeValidSQL(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.partialDDL || tcase.ignoreNormalizerTest { return } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -447,7 +450,7 @@ func TestNormalizeValidSQL(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) }) } @@ -463,7 +466,8 @@ func TestNormalizeOneCasae(t *testing.T) { if testOne.input == "" { t.Skip("empty test case") } - tree, err := Parse(testOne.input) + parser := NewTestParser() + tree, err := parser.Parse(testOne.input) require.NoError(t, err, testOne.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -477,12 +481,13 @@ func TestNormalizeOneCasae(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) } func TestGetBindVars(t *testing.T) { - stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") + parser := NewTestParser() + stmt, err := parser.Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") if err != nil { t.Fatal(err) } @@ -506,8 +511,9 @@ Prior to skip: BenchmarkNormalize-8 500000 3620 ns/op 1461 B/op 55 allocs/op */ func BenchmarkNormalize(b *testing.B) { + parser := NewTestParser() sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" - ast, reservedVars, err := Parse2(sql) + ast, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -517,6 +523,7 @@ func BenchmarkNormalize(b *testing.B) { } func BenchmarkNormalizeTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -527,7 +534,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) reservedVars := make([]BindVars, 0, len(queries)) for _, q := range queries { - pp, kb, err := Parse2(q) + pp, kb, err := parser.Parse2(q) if err != nil { b.Fatal(err) } @@ -549,6 +556,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { func BenchmarkNormalizeVTGate(b *testing.B) { const keyspace = "main_keyspace" + parser := NewTestParser() queries := loadQueries(b, "lobsters.sql.gz") if len(queries) > 10000 { @@ -560,7 +568,7 @@ func BenchmarkNormalizeVTGate(b *testing.B) { for i := 0; i < b.N; i++ { for _, sql := range queries { - stmt, reservedVars, err := Parse2(sql) + stmt, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -856,9 +864,10 @@ func benchmarkNormalization(b *testing.B, sqls []string) { b.Helper() b.ReportAllocs() b.ResetTimer() + parser := NewTestParser() for i := 0; i < b.N; i++ { for _, sql := range sqls { - stmt, reserved, err := Parse2(sql) + stmt, reserved, err := parser.Parse2(sql) if err != nil { b.Fatalf("%v: %q", err, sql) } diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 756bf4fb3d0..687bb7fbb51 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -34,7 +34,8 @@ func TestParseNextValid(t *testing.T) { sql.WriteRune(';') } - tokens := NewStringTokenizer(sql.String()) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(sql.String()) for _, tcase := range validSQL { want := tcase.output if want == "" { @@ -54,7 +55,8 @@ func TestParseNextValid(t *testing.T) { func TestIgnoreSpecialComments(t *testing.T) { input := `SELECT 1;/*! ALTER TABLE foo DISABLE KEYS */;SELECT 2;` - tokenizer := NewStringTokenizer(input) + parser := NewTestParser() + tokenizer := parser.NewStringTokenizer(input) tokenizer.SkipSpecialComments = true one, err := ParseNextStrictDDL(tokenizer) require.NoError(t, err) @@ -67,6 +69,7 @@ func TestIgnoreSpecialComments(t *testing.T) { // TestParseNextErrors tests all the error cases, and ensures a valid // SQL statement can be passed afterwards. func TestParseNextErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { if tcase.excludeMulti { // Skip tests which leave unclosed strings, or comments. @@ -74,7 +77,7 @@ func TestParseNextErrors(t *testing.T) { } t.Run(tcase.input, func(t *testing.T) { sql := tcase.input + "; select 1 from t" - tokens := NewStringTokenizer(sql) + tokens := parser.NewStringTokenizer(sql) // The first statement should be an error _, err := ParseNextStrictDDL(tokens) @@ -133,9 +136,9 @@ func TestParseNextEdgeCases(t *testing.T) { input: "create table a ignore me this is garbage; select 1 from a", want: []string{"create table a", "select 1 from a"}, }} - + parser := NewTestParser() for _, test := range tests { - tokens := NewStringTokenizer(test.input) + tokens := parser.NewStringTokenizer(test.input) for i, want := range test.want { tree, err := ParseNext(tokens) @@ -165,7 +168,8 @@ func TestParseNextStrictNonStrict(t *testing.T) { want := []string{"create table a", "select 1 from a"} // First go through as expected with non-strict DDL parsing. - tokens := NewStringTokenizer(input) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(input) for i, want := range want { tree, err := ParseNext(tokens) if err != nil { @@ -177,7 +181,7 @@ func TestParseNextStrictNonStrict(t *testing.T) { } // Now try again with strict parsing and observe the expected error. - tokens = NewStringTokenizer(input) + tokens = parser.NewStringTokenizer(input) _, err := ParseNextStrictDDL(tokens) if err == nil || !strings.Contains(err.Error(), "ignore") { t.Fatalf("ParseNext(%q) err = %q, want ignore", input, err) diff --git a/go/vt/sqlparser/parse_table.go b/go/vt/sqlparser/parse_table.go index 8766994ecfd..d522a855054 100644 --- a/go/vt/sqlparser/parse_table.go +++ b/go/vt/sqlparser/parse_table.go @@ -23,8 +23,8 @@ import ( // ParseTable parses the input as a qualified table name. // It handles all valid literal escaping. -func ParseTable(input string) (keyspace, table string, err error) { - tokenizer := NewStringTokenizer(input) +func (p *Parser) ParseTable(input string) (keyspace, table string, err error) { + tokenizer := p.NewStringTokenizer(input) // Start, want ID token, value := tokenizer.Scan() diff --git a/go/vt/sqlparser/parse_table_test.go b/go/vt/sqlparser/parse_table_test.go index 09e7ea44177..5f187cbc6d0 100644 --- a/go/vt/sqlparser/parse_table_test.go +++ b/go/vt/sqlparser/parse_table_test.go @@ -56,8 +56,9 @@ func TestParseTable(t *testing.T) { input: "k.t.", err: true, }} + parser := NewTestParser() for _, tcase := range testcases { - keyspace, table, err := ParseTable(tcase.input) + keyspace, table, err := parser.ParseTable(tcase.input) assert.Equal(t, tcase.keyspace, keyspace) assert.Equal(t, tcase.table, table) if tcase.err { diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index cc45db024d5..b80ded73b0b 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -3697,12 +3697,13 @@ var ( ) func TestValid(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) out := String(tree) assert.Equal(t, tcase.output, out) @@ -3734,6 +3735,7 @@ func TestParallelValid(t *testing.T) { wg := sync.WaitGroup{} wg.Add(parallelism) + parser := NewTestParser() for i := 0; i < parallelism; i++ { go func() { defer wg.Done() @@ -3742,7 +3744,7 @@ func TestParallelValid(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) continue @@ -3941,9 +3943,10 @@ func TestInvalid(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) require.Error(t, err) require.Contains(t, err.Error(), tcase.err) }) @@ -4081,12 +4084,13 @@ func TestIntroducers(t *testing.T) { input: "select _utf8mb3 'x'", output: "select _utf8mb3 'x' from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) assert.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4175,11 +4179,12 @@ func TestCaseSensitivity(t *testing.T) { }, { input: "select /* use */ 1 from t1 use index (A) where b = 1", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4274,11 +4279,12 @@ func TestKeywords(t *testing.T) { output: "select current_user(), current_user() from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4351,11 +4357,12 @@ func TestConvert(t *testing.T) { input: "select cast(json_keys(c) as char(64) array) from t", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4399,7 +4406,7 @@ func TestConvert(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4437,12 +4444,13 @@ func TestSelectInto(t *testing.T) { output: "alter vschema create vindex my_vdx using `hash`", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4461,7 +4469,7 @@ func TestSelectInto(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4498,8 +4506,9 @@ func TestPositionedErr(t *testing.T) { output: PositionedErr{"syntax error", 34, ""}, }} + parser := NewTestParser() for _, tcase := range invalidSQL { - tkn := NewStringTokenizer(tcase.input) + tkn := parser.NewStringTokenizer(tcase.input) _, err := ParseNext(tkn) if posErr, ok := err.(PositionedErr); !ok { @@ -4548,11 +4557,12 @@ func TestSubStr(t *testing.T) { output: `select substr(substr('foo', 1), 2) from t`, }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4572,8 +4582,9 @@ func TestLoadData(t *testing.T) { "load data infile 'x.txt' into table 'c'", "load data from s3 'x.txt' into table x"} + parser := NewTestParser() for _, tcase := range validSQL { - _, err := Parse(tcase) + _, err := parser.Parse(tcase) require.NoError(t, err) } } @@ -5750,10 +5761,11 @@ partition by range (YEAR(purchased)) subpartition by hash (TO_DAYS(purchased)) output: "create table t (\n\tid int,\n\tinfo JSON,\n\tkey zips ((cast(info -> '$.field' as unsigned array)))\n)", }, } + parser := NewTestParser() for _, test := range createTableQueries { sql := strings.TrimSpace(test.input) t.Run(sql, func(t *testing.T) { - tree, err := ParseStrictDDL(sql) + tree, err := parser.ParseStrictDDL(sql) require.NoError(t, err) got := String(tree) expected := test.output @@ -5776,7 +5788,8 @@ func TestOne(t *testing.T) { return } sql := strings.TrimSpace(testOne.input) - tree, err := Parse(sql) + parser := NewTestParser() + tree, err := parser.Parse(sql) require.NoError(t, err) got := String(tree) expected := testOne.output @@ -5805,8 +5818,9 @@ func TestCreateTableLike(t *testing.T) { "create table ks.a like unsharded_ks.b", }, } + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5835,8 +5849,9 @@ func TestCreateTableEscaped(t *testing.T) { "\tprimary key (`delete`)\n" + ")", }} + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5981,9 +5996,10 @@ var ( ) func TestErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := ParseStrictDDL(tcase.input) + _, err := parser.ParseStrictDDL(tcase.input) require.Error(t, err, tcase.output) require.Equal(t, tcase.output, err.Error()) }) @@ -6016,8 +6032,9 @@ func TestSkipToEnd(t *testing.T) { input: "create table a bb 'a;'; select * from t", output: "extra characters encountered after end of DDL: 'select'", }} + parser := NewTestParser() for _, tcase := range testcases { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -6049,8 +6066,9 @@ func loadQueries(t testing.TB, filename string) (queries []string) { } func TestParseDjangoQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "django_queries.txt") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6058,8 +6076,9 @@ func TestParseDjangoQueries(t *testing.T) { } func TestParseLobstersQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "lobsters.sql.gz") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6074,14 +6093,14 @@ func TestParseVersionedComments(t *testing.T) { }{ { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "50401", + mysqlVersion: "5.4.1", output: `create table table1 ( id int ) ENGINE InnoDB, CHARSET utf8mb4`, }, { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "80001", + mysqlVersion: "8.0.1", output: `create table table1 ( id int ) ENGINE InnoDB, @@ -6094,10 +6113,9 @@ partition by range (id) for _, testcase := range testcases { t.Run(testcase.input+":"+testcase.mysqlVersion, func(t *testing.T) { - oldMySQLVersion := mySQLParserVersion - defer func() { mySQLParserVersion = oldMySQLVersion }() - mySQLParserVersion = testcase.mysqlVersion - tree, err := Parse(testcase.input) + parser, err := New(Options{MySQLServerVersion: testcase.mysqlVersion}) + require.NoError(t, err) + tree, err := parser.Parse(testcase.input) require.NoError(t, err, testcase.input) out := String(tree) require.Equal(t, testcase.output, out) @@ -6106,6 +6124,7 @@ partition by range (id) } func BenchmarkParseTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -6117,7 +6136,7 @@ func BenchmarkParseTraces(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { b.Fatal(err) } @@ -6134,6 +6153,7 @@ func BenchmarkParseStress(b *testing.B) { sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" ) + parser := NewTestParser() for i, sql := range []string{sql1, sql2} { b.Run(fmt.Sprintf("sql%d", i), func(b *testing.B) { var buf strings.Builder @@ -6143,7 +6163,7 @@ func BenchmarkParseStress(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := Parse(querySQL) + _, err := parser.Parse(querySQL) if err != nil { b.Fatal(err) } @@ -6182,8 +6202,9 @@ func BenchmarkParse3(b *testing.B) { b.ResetTimer() b.ReportAllocs() + parser := NewTestParser() for i := 0; i < b.N; i++ { - if _, err := Parse(benchQuery); err != nil { + if _, err := parser.Parse(benchQuery); err != nil { b.Fatal(err) } } @@ -6234,6 +6255,7 @@ func escapeNewLines(in string) string { } func testFile(t *testing.T, filename, tempDir string) { + parser := NewTestParser() t.Run(filename, func(t *testing.T) { fail := false expected := strings.Builder{} @@ -6243,7 +6265,7 @@ func testFile(t *testing.T, filename, tempDir string) { tcase.output = tcase.input } expected.WriteString(fmt.Sprintf("%sINPUT\n%s\nEND\n", tcase.comments, escapeNewLines(tcase.input))) - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if tcase.errStr != "" { errPresent := "" if err != nil { diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index b6b03a1901a..a612e555ee8 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -21,12 +21,7 @@ import ( "fmt" "strings" - "vitess.io/vitess/go/bytes2" - vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -34,11 +29,12 @@ import ( // bind locations are precomputed for fast substitutions. type ParsedQuery struct { Query string - bindLocations []bindLocation + bindLocations []BindLocation + truncateUILen int } -type bindLocation struct { - offset, length int +type BindLocation struct { + Offset, Length int } // NewParsedQuery returns a ParsedQuery of the ast. @@ -67,8 +63,8 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error { current := 0 for _, loc := range pq.bindLocations { - buf.WriteString(pq.Query[current:loc.offset]) - name := pq.Query[loc.offset : loc.offset+loc.length] + buf.WriteString(pq.Query[current:loc.Offset]) + name := pq.Query[loc.Offset : loc.Offset+loc.Length] if encodable, ok := extras[name[1:]]; ok { encodable.EncodeSQL(buf) } else { @@ -78,86 +74,19 @@ func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*qu } EncodeValue(buf, supplied) } - current = loc.offset + loc.length + current = loc.Offset + loc.Length } buf.WriteString(pq.Query[current:]) return nil } -// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that -// the fields in the row are in the same order as the placeholders in this query. The fields might include generated -// columns which are dropped, by checking against skipFields, before binding the variables -// note: there can be more fields than bind locations since extra columns might be requested from the source if not all -// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for -// values from the database on the source: sum/count for aggregation queries, for example -func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { - if len(fields) < len(pq.bindLocations) { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", - len(fields), len(pq.bindLocations)) - } - - type colInfo struct { - typ querypb.Type - length int64 - offset int64 - } - rowInfo := make([]*colInfo, 0) - - offset := int64(0) - for i, field := range fields { // collect info required for fields to be bound - length := row.Lengths[i] - if !skipFields[strings.ToLower(field.Name)] { - rowInfo = append(rowInfo, &colInfo{ - typ: field.Type, - length: length, - offset: offset, - }) - } - if length > 0 { - offset += row.Lengths[i] - } - } - - // bind field values to locations - var offsetQuery int - for i, loc := range pq.bindLocations { - col := rowInfo[i] - buf.WriteString(pq.Query[offsetQuery:loc.offset]) - typ := col.typ - - switch typ { - case querypb.Type_TUPLE: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) - case querypb.Type_JSON: - if col.length < 0 { // An SQL NULL and not an actual JSON value - buf.WriteString(sqltypes.NullStr) - } else { // A JSON value (which may be a JSON null literal value) - buf2 := row.Values[col.offset : col.offset+col.length] - vv, err := vjson.MarshalSQLValue(buf2) - if err != nil { - return err - } - buf.WriteString(vv.RawStr()) - } - default: - if col.length < 0 { - // -1 means a null variable; serialize it directly - buf.WriteString(sqltypes.NullStr) - } else { - vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) - vv.EncodeSQLBytes2(buf) - } - } - offsetQuery = loc.offset + loc.length - } - buf.WriteString(pq.Query[offsetQuery:]) - return nil +func (pq *ParsedQuery) BindLocations() []BindLocation { + return pq.bindLocations } // MarshalJSON is a custom JSON marshaler for ParsedQuery. -// Note that any queries longer that 512 bytes will be truncated. func (pq *ParsedQuery) MarshalJSON() ([]byte, error) { - return json.Marshal(TruncateForUI(pq.Query)) + return json.Marshal(pq.Query) } // EncodeValue encodes one bind variable value into the query. diff --git a/go/vt/sqlparser/parsed_query_test.go b/go/vt/sqlparser/parsed_query_test.go index 8c89a51984d..ef59676883f 100644 --- a/go/vt/sqlparser/parsed_query_test.go +++ b/go/vt/sqlparser/parsed_query_test.go @@ -27,7 +27,8 @@ import ( ) func TestNewParsedQuery(t *testing.T) { - stmt, err := Parse("select * from a where id =:id") + parser := NewTestParser() + stmt, err := parser.Parse("select * from a where id =:id") if err != nil { t.Error(err) return @@ -35,7 +36,7 @@ func TestNewParsedQuery(t *testing.T) { pq := NewParsedQuery(stmt) want := &ParsedQuery{ Query: "select * from a where id = :id", - bindLocations: []bindLocation{{offset: 27, length: 3}}, + bindLocations: []BindLocation{{Offset: 27, Length: 3}}, } if !reflect.DeepEqual(pq, want) { t.Errorf("GenerateParsedQuery: %+v, want %+v", pq, want) @@ -135,8 +136,9 @@ func TestGenerateQuery(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range tcases { - tree, err := Parse(tcase.query) + tree, err := parser.Parse(tcase.query) if err != nil { t.Errorf("parse failed for %s: %v", tcase.desc, err) continue diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go index 1d3a8b0fad1..4021d4d61be 100644 --- a/go/vt/sqlparser/parser.go +++ b/go/vt/sqlparser/parser.go @@ -23,16 +23,13 @@ import ( "strings" "sync" - "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -var versionFlagSync sync.Once - // parserPool is a pool for parser objects. var parserPool = sync.Pool{ New: func() any { @@ -43,9 +40,6 @@ var parserPool = sync.Pool{ // zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. var zeroParser yyParserImpl -// mySQLParserVersion is the version of MySQL that the parser would emulate -var mySQLParserVersion string - // yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a // particularly good reason to use yyParse directly, since it immediately discards its parser. // @@ -80,12 +74,12 @@ func yyParsePooled(yylex yyLexer) int { // bind variables that were found in the original SQL query. If a DDL statement // is partially parsed but still contains a syntax error, the // error is ignored and the DDL is returned anyway. -func Parse2(sql string) (Statement, BindVars, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) Parse2(sql string) (Statement, BindVars, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil { if typ, val := tokenizer.Scan(); typ != 0 { - return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) + return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", val) } log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) switch x := tokenizer.partialDDL.(type) { @@ -105,28 +99,6 @@ func Parse2(sql string) (Statement, BindVars, error) { return tokenizer.ParseTree, tokenizer.BindVars, nil } -func checkParserVersionFlag() { - if flag.Parsed() { - versionFlagSync.Do(func() { - convVersion, err := convertMySQLVersionToCommentVersion(servenv.MySQLServerVersion()) - if err != nil { - log.Fatalf("unable to parse mysql version: %v", err) - } - mySQLParserVersion = convVersion - }) - } -} - -// SetParserVersion sets the mysql parser version -func SetParserVersion(version string) { - mySQLParserVersion = version -} - -// GetParserVersion returns the version of the mysql parser -func GetParserVersion() string { - return mySQLParserVersion -} - // convertMySQLVersionToCommentVersion converts the MySQL version into comment version format. func convertMySQLVersionToCommentVersion(version string) (string, error) { var res = make([]int, 3) @@ -166,8 +138,8 @@ func convertMySQLVersionToCommentVersion(version string) (string, error) { } // ParseExpr parses an expression and transforms it to an AST -func ParseExpr(sql string) (Expr, error) { - stmt, err := Parse("select " + sql) +func (p *Parser) ParseExpr(sql string) (Expr, error) { + stmt, err := p.Parse("select " + sql) if err != nil { return nil, err } @@ -176,15 +148,15 @@ func ParseExpr(sql string) (Expr, error) { } // Parse behaves like Parse2 but does not return a set of bind variables -func Parse(sql string) (Statement, error) { - stmt, _, err := Parse2(sql) +func (p *Parser) Parse(sql string) (Statement, error) { + stmt, _, err := p.Parse2(sql) return stmt, err } // ParseStrictDDL is the same as Parse except it errors on // partially parsed DDL statements. -func ParseStrictDDL(sql string) (Statement, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) ParseStrictDDL(sql string) (Statement, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { return nil, tokenizer.LastError } @@ -198,7 +170,7 @@ func ParseStrictDDL(sql string) (Statement, error) { // returning a Statement which is the AST representation of the query. // The tokenizer will always read up to the end of the statement, allowing for // the next call to ParseNext to parse any subsequent SQL statements. When -// there are no more statements to parse, a error of io.EOF is returned. +// there are no more statements to parse, an error of io.EOF is returned. func ParseNext(tokenizer *Tokenizer) (Statement, error) { return parseNext(tokenizer, false) } @@ -237,10 +209,10 @@ func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { // ErrEmpty is a sentinel error returned when parsing empty statements. var ErrEmpty = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.EmptyQuery, "Query was empty") -// SplitStatement returns the first sql statement up to either a ; or EOF +// SplitStatement returns the first sql statement up to either a ';' or EOF // and the remainder from the given buffer -func SplitStatement(blob string) (string, string, error) { - tokenizer := NewStringTokenizer(blob) +func (p *Parser) SplitStatement(blob string) (string, string, error) { + tokenizer := p.NewStringTokenizer(blob) tkn := 0 for { tkn, _ = tokenizer.Scan() @@ -259,7 +231,7 @@ func SplitStatement(blob string) (string, string, error) { // SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces // returns the sql pieces blob contains; or error if sql cannot be parsed -func SplitStatementToPieces(blob string) (pieces []string, err error) { +func (p *Parser) SplitStatementToPieces(blob string) (pieces []string, err error) { // fast path: the vast majority of SQL statements do not have semicolons in them if blob == "" { return nil, nil @@ -267,12 +239,15 @@ func SplitStatementToPieces(blob string) (pieces []string, err error) { switch strings.IndexByte(blob, ';') { case -1: // if there is no semicolon, return blob as a whole return []string{blob}, nil - case len(blob) - 1: // if there's a single semicolon and it's the last character, return blob without it + case len(blob) - 1: // if there's a single semicolon, and it's the last character, return blob without it return []string{blob[:len(blob)-1]}, nil } pieces = make([]string, 0, 16) - tokenizer := NewStringTokenizer(blob) + // It's safe here to not case about version specific tokenization + // because we are only interested in semicolons and splitting + // statements. + tokenizer := p.NewStringTokenizer(blob) tkn := 0 var stmt string @@ -307,6 +282,49 @@ loop: return } -func IsMySQL80AndAbove() bool { - return mySQLParserVersion >= "80000" +func (p *Parser) IsMySQL80AndAbove() bool { + return p.version >= "80000" +} + +func (p *Parser) SetTruncateErrLen(l int) { + p.truncateErrLen = l +} + +type Options struct { + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int +} + +type Parser struct { + version string + truncateUILen int + truncateErrLen int +} + +func New(opts Options) (*Parser, error) { + if opts.MySQLServerVersion == "" { + opts.MySQLServerVersion = config.DefaultMySQLVersion + } + convVersion, err := convertMySQLVersionToCommentVersion(opts.MySQLServerVersion) + if err != nil { + return nil, err + } + return &Parser{ + version: convVersion, + truncateUILen: opts.TruncateUILen, + truncateErrLen: opts.TruncateErrLen, + }, nil +} + +func NewTestParser() *Parser { + convVersion, err := convertMySQLVersionToCommentVersion(config.DefaultMySQLVersion) + if err != nil { + panic(err) + } + return &Parser{ + version: convVersion, + truncateUILen: 512, + truncateErrLen: 0, + } } diff --git a/go/vt/sqlparser/parser_test.go b/go/vt/sqlparser/parser_test.go index 537cc598da7..5cb15317f29 100644 --- a/go/vt/sqlparser/parser_test.go +++ b/go/vt/sqlparser/parser_test.go @@ -51,9 +51,10 @@ func TestEmptyErrorAndComments(t *testing.T) { output: "select 1 from dual", }, } + parser := NewTestParser() for _, testcase := range testcases { t.Run(testcase.input, func(t *testing.T) { - res, err := Parse(testcase.input) + res, err := parser.Parse(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { @@ -63,7 +64,7 @@ func TestEmptyErrorAndComments(t *testing.T) { }) t.Run(testcase.input+"-Strict DDL", func(t *testing.T) { - res, err := ParseStrictDDL(testcase.input) + res, err := parser.ParseStrictDDL(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index a6cbffee351..774ada31dbd 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -53,8 +53,9 @@ func TestAndOrPrecedence(t *testing.T) { input: "select * from a where a=b or c=d and e=f", output: "(a = b or (c = d and e = f))", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -77,8 +78,9 @@ func TestPlusStarPrecedence(t *testing.T) { input: "select 1*2+3 from a", output: "((1 * 2) + 3)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -104,8 +106,9 @@ func TestIsPrecedence(t *testing.T) { input: "select * from a where (a=1 and b=2) is true", output: "((a = 1 and b = 2) is true)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -158,9 +161,10 @@ func TestParens(t *testing.T) { {in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"}, } + parser := NewTestParser() for _, tc := range tests { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse("select " + tc.in) + stmt, err := parser.Parse("select " + tc.in) require.NoError(t, err) out := String(stmt) require.Equal(t, "select "+tc.expected+" from dual", out) @@ -177,6 +181,7 @@ func TestRandom(t *testing.T) { g := NewGenerator(r, 5) endBy := time.Now().Add(1 * time.Second) + parser := NewTestParser() for { if time.Now().After(endBy) { break @@ -186,7 +191,7 @@ func TestRandom(t *testing.T) { inputQ := "select " + String(randomExpr) + " from t" // When it's parsed and unparsed - parsedInput, err := Parse(inputQ) + parsedInput, err := parser.Parse(inputQ) require.NoError(t, err, inputQ) // Then the unparsing should be the same as the input query diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go index a4bbb5f7b5c..ceb4b276017 100644 --- a/go/vt/sqlparser/predicate_rewriting_test.go +++ b/go/vt/sqlparser/predicate_rewriting_test.go @@ -86,9 +86,10 @@ func TestSimplifyExpression(in *testing.T) { expected: "A and (B or C)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) expr, changed := simplifyExpression(expr) @@ -157,9 +158,10 @@ func TestRewritePredicate(in *testing.T) { expected: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := RewritePredicate(expr) @@ -180,9 +182,10 @@ func TestExtractINFromOR(in *testing.T) { expected: "(a) in ((1), (2), (3), (4), (5), (6))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := ExtractINFromOR(expr.(*OrExpr)) diff --git a/go/vt/sqlparser/redact_query.go b/go/vt/sqlparser/redact_query.go index 194ad1ca64d..e6b8c009c68 100644 --- a/go/vt/sqlparser/redact_query.go +++ b/go/vt/sqlparser/redact_query.go @@ -19,11 +19,11 @@ package sqlparser import querypb "vitess.io/vitess/go/vt/proto/query" // RedactSQLQuery returns a sql string with the params stripped out for display -func RedactSQLQuery(sql string) (string, error) { +func (p *Parser) RedactSQLQuery(sql string) (string, error) { bv := map[string]*querypb.BindVariable{} sqlStripped, comments := SplitMarginComments(sql) - stmt, reservedVars, err := Parse2(sqlStripped) + stmt, reservedVars, err := p.Parse2(sqlStripped) if err != nil { return "", err } diff --git a/go/vt/sqlparser/redact_query_test.go b/go/vt/sqlparser/redact_query_test.go index 1cfd6d83af3..042f0f5b5f2 100644 --- a/go/vt/sqlparser/redact_query_test.go +++ b/go/vt/sqlparser/redact_query_test.go @@ -23,8 +23,9 @@ import ( ) func TestRedactSQLStatements(t *testing.T) { + parser := NewTestParser() sql := "select a,b,c from t where x = 1234 and y = 1234 and z = 'apple'" - redactedSQL, err := RedactSQLQuery(sql) + redactedSQL, err := parser.RedactSQLQuery(sql) if err != nil { t.Fatalf("redacting sql failed: %v", err) } diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go index 3044e04f8b0..91c925d672f 100644 --- a/go/vt/sqlparser/rewriter_test.go +++ b/go/vt/sqlparser/rewriter_test.go @@ -43,7 +43,8 @@ func BenchmarkVisitLargeExpression(b *testing.B) { func TestReplaceWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -67,7 +68,8 @@ func TestReplaceWorksInLaterCalls(t *testing.T) { func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -94,7 +96,8 @@ func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { } func TestChangeValueTypeGivesError(t *testing.T) { - parse, err := Parse("select * from a join b on a.id = b.id") + parser := NewTestParser() + parse, err := parser.Parse("select * from a join b on a.id = b.id") require.NoError(t, err) defer func() { diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 2b82e619445..58f575f8642 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -44,18 +44,18 @@ type Tokenizer struct { multi bool specialComment *Tokenizer - Pos int - buf string + Pos int + buf string + parser *Parser } // NewStringTokenizer creates a new Tokenizer for the // sql string. -func NewStringTokenizer(sql string) *Tokenizer { - checkParserVersionFlag() - +func (p *Parser) NewStringTokenizer(sql string) *Tokenizer { return &Tokenizer{ buf: sql, BindVars: make(map[string]struct{}), + parser: p, } } @@ -680,9 +680,9 @@ func (tkn *Tokenizer) scanMySQLSpecificComment() (int, string) { commentVersion, sql := ExtractMysqlComment(tkn.buf[start:tkn.Pos]) - if mySQLParserVersion >= commentVersion { + if tkn.parser.version >= commentVersion { // Only add the special comment to the tokenizer if the version of MySQL is higher or equal to the comment version - tkn.specialComment = NewStringTokenizer(sql) + tkn.specialComment = tkn.parser.NewStringTokenizer(sql) } return tkn.Scan() diff --git a/go/vt/sqlparser/token_test.go b/go/vt/sqlparser/token_test.go index 0fd43b8f86c..b6848d35f06 100644 --- a/go/vt/sqlparser/token_test.go +++ b/go/vt/sqlparser/token_test.go @@ -74,9 +74,10 @@ func TestLiteralID(t *testing.T) { out: "@x @y", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) require.Equal(t, tcase.out, string(out)) @@ -148,9 +149,10 @@ func TestString(t *testing.T) { want: "hello", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - id, got := NewStringTokenizer(tcase.in).Scan() + id, got := parser.NewStringTokenizer(tcase.in).Scan() require.Equal(t, tcase.id, id, "Scan(%q) = (%s), want (%s)", tcase.in, tokenName(id), tokenName(tcase.id)) require.Equal(t, tcase.want, string(got)) }) @@ -193,9 +195,10 @@ func TestSplitStatement(t *testing.T) { sql: "", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - sql, rem, err := SplitStatement(tcase.in) + sql, rem, err := parser.SplitStatement(tcase.in) if err != nil { t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) return @@ -218,27 +221,28 @@ func TestVersion(t *testing.T) { in string id []int }{{ - version: "50709", + version: "5.7.9", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80101", + version: "8.1.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80201", + version: "8.2.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }, { - version: "80102", + version: "8.1.2", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }} for _, tcase := range testcases { t.Run(tcase.version+"_"+tcase.in, func(t *testing.T) { - mySQLParserVersion = tcase.version - tok := NewStringTokenizer(tcase.in) + parser, err := New(Options{MySQLServerVersion: tcase.version}) + require.NoError(t, err) + tok := parser.NewStringTokenizer(tcase.in) for _, expectedID := range tcase.id { id, _ := tok.Scan() require.Equal(t, expectedID, id) @@ -306,9 +310,10 @@ func TestIntegerAndID(t *testing.T) { out: "3.2", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) expectedOut := tcase.out diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index aab0c1a1331..aec206f3b3d 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -34,7 +34,7 @@ type NodeFormatter func(buf *TrackedBuffer, node SQLNode) // want to generate a query that's different from the default. type TrackedBuffer struct { *strings.Builder - bindLocations []bindLocation + bindLocations []BindLocation nodeFormatter NodeFormatter literal func(string) (int, error) fast bool @@ -288,9 +288,9 @@ func areBothISExpr(op Expr, val Expr) bool { // WriteArg writes a value argument into the buffer along with // tracking information for future substitutions. func (buf *TrackedBuffer) WriteArg(prefix, arg string) { - buf.bindLocations = append(buf.bindLocations, bindLocation{ - offset: buf.Len(), - length: len(prefix) + len(arg), + buf.bindLocations = append(buf.bindLocations, BindLocation{ + Offset: buf.Len(), + Length: len(prefix) + len(arg), }) buf.WriteString(prefix) buf.WriteString(arg) diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go index 2375441b34e..4dff65634e8 100644 --- a/go/vt/sqlparser/tracked_buffer_test.go +++ b/go/vt/sqlparser/tracked_buffer_test.go @@ -278,16 +278,17 @@ func TestCanonicalOutput(t *testing.T) { }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - tree, err := Parse(tc.input) + tree, err := parser.Parse(tc.input) require.NoError(t, err, tc.input) out := CanonicalString(tree) require.Equal(t, tc.canonical, out, "bad serialization") // Make sure we've generated a valid query! - rereadStmt, err := Parse(out) + rereadStmt, err := parser.Parse(out) require.NoError(t, err, out) out = CanonicalString(rereadStmt) require.Equal(t, tc.canonical, out, "bad serialization") diff --git a/go/vt/sqlparser/truncate_query.go b/go/vt/sqlparser/truncate_query.go index 4bb63730fd2..3f4231fe8b5 100644 --- a/go/vt/sqlparser/truncate_query.go +++ b/go/vt/sqlparser/truncate_query.go @@ -16,55 +16,14 @@ limitations under the License. package sqlparser -import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" -) - -var ( - // truncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. - truncateUILen = 512 - - // truncateErrLen truncate queries in error logs to the given length. 0 means unlimited. - truncateErrLen = 0 -) - const TruncationText = "[TRUNCATED]" -func registerQueryTruncationFlags(fs *pflag.FlagSet) { - fs.IntVar(&truncateUILen, "sql-max-length-ui", truncateUILen, "truncate queries in debug UIs to the given length (default 512)") - fs.IntVar(&truncateErrLen, "sql-max-length-errors", truncateErrLen, "truncate queries in error logs to the given length (default unlimited)") -} - -func init() { - for _, cmd := range []string{ - "vtgate", - "vttablet", - "vtcombo", - "vtctld", - "vtctl", - "vtexplain", - "vtbackup", - "vttestserver", - "vtbench", - } { - servenv.OnParseFor(cmd, registerQueryTruncationFlags) - } -} - // GetTruncateErrLen is a function used to read the value of truncateErrLen -func GetTruncateErrLen() int { - return truncateErrLen -} - -// SetTruncateErrLen is a function used to override the value of truncateErrLen -// It is only meant to be used from tests and not from production code. -func SetTruncateErrLen(errLen int) { - truncateErrLen = errLen +func (p *Parser) GetTruncateErrLen() int { + return p.truncateErrLen } -func truncateQuery(query string, max int) string { +func TruncateQuery(query string, max int) string { sql, comments := SplitMarginComments(query) if max == 0 || len(sql) <= max { @@ -76,13 +35,13 @@ func truncateQuery(query string, max int) string { // TruncateForUI is used when displaying queries on various Vitess status pages // to keep the pages small enough to load and render properly -func TruncateForUI(query string) string { - return truncateQuery(query, truncateUILen) +func (p *Parser) TruncateForUI(query string) string { + return TruncateQuery(query, p.truncateUILen) } // TruncateForLog is used when displaying queries as part of error logs // to avoid overwhelming logging systems with potentially long queries and // bind value data. -func TruncateForLog(query string) string { - return truncateQuery(query, truncateErrLen) +func (p *Parser) TruncateForLog(query string) string { + return TruncateQuery(query, p.truncateErrLen) } diff --git a/go/vt/sqlparser/truncate_query_test.go b/go/vt/sqlparser/truncate_query_test.go index e5fc2fc0a9c..c7a2eed4493 100644 --- a/go/vt/sqlparser/truncate_query_test.go +++ b/go/vt/sqlparser/truncate_query_test.go @@ -26,7 +26,7 @@ func TestTruncateQuery(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("%s-%d", tt.query, tt.max), func(t *testing.T) { - assert.Equalf(t, tt.want, truncateQuery(tt.query, tt.max), "truncateQuery(%v, %v)", tt.query, tt.max) + assert.Equalf(t, tt.want, TruncateQuery(tt.query, tt.max), "TruncateQuery(%v, %v)", tt.query, tt.max) }) } } diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 2258eb2fd02..16c3e4ce976 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -25,18 +25,18 @@ import ( // QueryMatchesTemplates sees if the given query has the same fingerprint as one of the given templates // (one is enough) -func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { +func (p *Parser) QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { if len(queryTemplates) == 0 { return false, fmt.Errorf("No templates found") } bv := make(map[string]*querypb.BindVariable) normalize := func(q string) (string, error) { - q, err := NormalizeAlphabetically(q) + q, err := p.NormalizeAlphabetically(q) if err != nil { return "", err } - stmt, reservedVars, err := Parse2(q) + stmt, reservedVars, err := p.Parse2(q) if err != nil { return "", err } @@ -69,8 +69,8 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e // NormalizeAlphabetically rewrites given query such that: // - WHERE 'AND' expressions are reordered alphabetically -func NormalizeAlphabetically(query string) (normalized string, err error) { - stmt, err := Parse(query) +func (p *Parser) NormalizeAlphabetically(query string) (normalized string, err error) { + stmt, err := p.Parse(query) if err != nil { return normalized, err } @@ -118,12 +118,12 @@ func NormalizeAlphabetically(query string) (normalized string, err error) { // replaces any cases of the provided database name with the // specified replacement name. // Note: both database names provided should be unescaped strings. -func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { +func (p *Parser) ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { if newdb == olddb { // Nothing to do here. return query, nil } - in, err := Parse(query) + in, err := p.Parse(query) if err != nil { return "", err } diff --git a/go/vt/sqlparser/utils_test.go b/go/vt/sqlparser/utils_test.go index 63c9b10ba43..b2833a8187c 100644 --- a/go/vt/sqlparser/utils_test.go +++ b/go/vt/sqlparser/utils_test.go @@ -47,8 +47,9 @@ func TestNormalizeAlphabetically(t *testing.T) { out: "select * from tbl where b = 4 or a = 3", }} + parser := NewTestParser() for _, tc := range testcases { - normalized, err := NormalizeAlphabetically(tc.in) + normalized, err := parser.NormalizeAlphabetically(tc.in) assert.NoError(t, err) assert.Equal(t, tc.out, normalized) } @@ -173,9 +174,10 @@ func TestQueryMatchesTemplates(t *testing.T) { out: true, }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - match, err := QueryMatchesTemplates(tc.q, tc.tmpl) + match, err := parser.QueryMatchesTemplates(tc.q, tc.tmpl) assert.NoError(t, err) assert.Equal(t, tc.out, match) }) @@ -263,9 +265,10 @@ func TestReplaceTableQualifiers(t *testing.T) { out: "set names 'binary'", }, } + parser := NewTestParser() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReplaceTableQualifiers(tt.in, origDB, tt.newdb) + got, err := parser.ReplaceTableQualifiers(tt.in, origDB, tt.newdb) if tt.wantErr { require.Error(t, err) } else { diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index eda0b300559..b0e8a8d8bb1 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -26,6 +26,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" @@ -116,9 +118,9 @@ type replica struct { wg sync.WaitGroup } -func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server, collationEnv *collations.Environment) *replica { +func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *replica { t := &testing.T{} - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) fakeTablet := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, nil, testlib.TabletKeyspaceShard(t, "ks", "-80")) fakeTablet.StartActionLoop(t, wr) @@ -311,7 +313,15 @@ func main() { log.Infof("start rate set to: %v", rate) ts := memorytopo.NewServer(context.Background(), "cell1") collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) - replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts, collationEnv) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatal(err) + } + replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts, collationEnv, parser) primary := &primary{replica: replica} client := newClient(context.Background(), primary, replica, ts) client.run() diff --git a/go/vt/topo/helpers/compare_test.go b/go/vt/topo/helpers/compare_test.go index d31eedee2e9..82924e522f5 100644 --- a/go/vt/topo/helpers/compare_test.go +++ b/go/vt/topo/helpers/compare_test.go @@ -17,9 +17,10 @@ limitations under the License. package helpers import ( + "context" "testing" - "context" + "vitess.io/vitess/go/vt/sqlparser" ) func TestBasicCompare(t *testing.T) { @@ -32,7 +33,7 @@ func TestBasicCompare(t *testing.T) { t.Fatalf("Compare keyspaces is not failing when topos are not in sync") } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) err = CompareKeyspaces(ctx, fromTS, toTS) if err != nil { diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index 0df706eba31..6dff1c6ac22 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -25,6 +25,7 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +34,7 @@ import ( ) // CopyKeyspaces will create the keyspaces in the destination topo. -func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { +func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { return fmt.Errorf("GetKeyspaces: %w", err) @@ -57,7 +58,7 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { vs, err := fromTS.GetVSchema(ctx, keyspace) switch { case err == nil: - _, err = vindexes.BuildKeyspace(vs) + _, err = vindexes.BuildKeyspace(vs, parser) if err != nil { log.Errorf("BuildKeyspace(%v): %v", keyspace, err) break diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 2086a2e6552..142c6eb49ac 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -104,7 +106,7 @@ func TestBasic(t *testing.T) { fromTS, toTS := createSetup(ctx, t) // check keyspace copy - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) keyspaces, err := toTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("toTS.GetKeyspaces failed: %v", err) @@ -112,7 +114,7 @@ func TestBasic(t *testing.T) { if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Fatalf("unexpected keyspaces: %v", keyspaces) } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) // check shard copy CopyShards(ctx, fromTS, toTS) diff --git a/go/vt/topo/helpers/tee_test.go b/go/vt/topo/helpers/tee_test.go index 4dda901c300..1fbba807937 100644 --- a/go/vt/topo/helpers/tee_test.go +++ b/go/vt/topo/helpers/tee_test.go @@ -17,12 +17,13 @@ limitations under the License. package helpers import ( + "context" "reflect" "testing" "github.com/stretchr/testify/require" - "context" + "vitess.io/vitess/go/vt/sqlparser" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -32,7 +33,7 @@ func TestTee(t *testing.T) { // create the setup, copy the data fromTS, toTS := createSetup(ctx, t) - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) CopyShards(ctx, fromTS, toTS) CopyTablets(ctx, fromTS, toTS) diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 97c2bcdc733..28c7aaa9bbe 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -32,6 +32,8 @@ import ( "github.com/gorilla/mux" "github.com/patrickmn/go-cache" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sets" @@ -81,6 +83,7 @@ type API struct { vtexplainLock sync.Mutex collationEnv *collations.Environment + parser *sqlparser.Parser } // Options wraps the configuration options for different components of the @@ -96,7 +99,7 @@ type Options struct { // NewAPI returns a new API, configured to service the given set of clusters, // and configured with the given options. -func NewAPI(clusters []*cluster.Cluster, opts Options, collationEnv *collations.Environment) *API { +func NewAPI(clusters []*cluster.Cluster, opts Options, collationEnv *collations.Environment, parser *sqlparser.Parser) *API { clusterMap := make(map[string]*cluster.Cluster, len(clusters)) for _, cluster := range clusters { clusterMap[cluster.ID] = cluster @@ -143,6 +146,7 @@ func NewAPI(clusters []*cluster.Cluster, opts Options, collationEnv *collations. clusterMap: clusterMap, authz: authz, collationEnv: collationEnv, + parser: parser, } if opts.EnableDynamicClusters { @@ -302,6 +306,7 @@ func (api *API) WithCluster(c *cluster.Cluster, id string) dynamic.API { authz: api.authz, options: api.options, collationEnv: api.collationEnv, + parser: api.parser, } if c != nil { @@ -2154,7 +2159,7 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, er.Error() } - vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}, api.collationEnv) + vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}, api.collationEnv, api.parser) if err != nil { return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) } diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go index e94692d50c7..7323f601b25 100644 --- a/go/vt/vtadmin/api_authz_test.go +++ b/go/vt/vtadmin/api_authz_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -67,7 +68,7 @@ func TestCreateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -132,7 +133,7 @@ func TestCreateShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -199,7 +200,7 @@ func TestDeleteKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -264,7 +265,7 @@ func TestDeleteShards(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -339,7 +340,7 @@ func TestDeleteTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -406,7 +407,7 @@ func TestEmergencyFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -482,7 +483,7 @@ func TestFindSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -503,7 +504,7 @@ func TestFindSchema(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -523,7 +524,7 @@ func TestFindSchema(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -572,7 +573,7 @@ func TestGetBackups(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -645,7 +646,7 @@ func TestGetCellInfos(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -724,7 +725,7 @@ func TestGetCellsAliases(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -791,7 +792,7 @@ func TestGetClusters(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -865,7 +866,7 @@ func TestGetGates(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -884,7 +885,7 @@ func TestGetGates(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -904,7 +905,7 @@ func TestGetGates(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -945,7 +946,7 @@ func TestGetKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1012,7 +1013,7 @@ func TestGetKeyspaces(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1096,7 +1097,7 @@ func TestGetSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1119,7 +1120,7 @@ func TestGetSchema(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1172,7 +1173,7 @@ func TestGetSchemas(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1191,7 +1192,7 @@ func TestGetSchemas(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1217,7 +1218,7 @@ func TestGetSchemas(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1270,7 +1271,7 @@ func TestGetShardReplicationPositions(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1351,7 +1352,7 @@ func TestGetSrvVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1418,7 +1419,7 @@ func TestGetSrvVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1499,7 +1500,7 @@ func TestGetTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1570,7 +1571,7 @@ func TestGetTablets(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1651,7 +1652,7 @@ func TestGetVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1718,7 +1719,7 @@ func TestGetVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1808,7 +1809,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1827,7 +1828,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1847,7 +1848,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1888,7 +1889,7 @@ func TestGetWorkflow(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1957,7 +1958,7 @@ func TestGetWorkflows(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2024,7 +2025,7 @@ func TestPingTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2089,7 +2090,7 @@ func TestPlannedFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2156,7 +2157,7 @@ func TestRefreshState(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2221,7 +2222,7 @@ func TestRefreshTabletReplicationSource(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2292,7 +2293,7 @@ func TestReloadSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2372,7 +2373,7 @@ func TestRunHealthCheck(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2437,7 +2438,7 @@ func TestSetReadOnly(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2502,7 +2503,7 @@ func TestSetReadWrite(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2567,7 +2568,7 @@ func TestStartReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2632,7 +2633,7 @@ func TestStopReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2697,7 +2698,7 @@ func TestTabletExternallyPromoted(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2765,7 +2766,7 @@ func TestVTExplain(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2787,7 +2788,7 @@ func TestVTExplain(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2830,7 +2831,7 @@ func TestValidateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2891,7 +2892,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2952,7 +2953,7 @@ func TestValidateVersionKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index 549675e5145..c7020bd4e20 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -32,6 +32,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" _flag "vitess.io/vitess/go/internal/flag" @@ -557,7 +559,7 @@ func TestFindSchema(t *testing.T) { clusters[i] = vtadmintestutil.BuildCluster(t, cfg) } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.FindSchema(ctx, tt.req) @@ -767,7 +769,7 @@ func TestFindSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() schema, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ @@ -867,7 +869,7 @@ func TestGetClusters(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{}, collations.MySQL8()) + api := NewAPI(tt.clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) assert.NoError(t, err) @@ -945,7 +947,7 @@ func TestGetGates(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ctx := context.Background() resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) @@ -1067,7 +1069,7 @@ func TestGetKeyspace(t *testing.T) { testutil.AddShards(ctx, t, ts, shards...) topos[i] = ts vtctlds[i] = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) } @@ -1083,7 +1085,7 @@ func TestGetKeyspace(t *testing.T) { }) } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ks, err := api.GetKeyspace(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -1311,10 +1313,10 @@ func TestGetKeyspaces(t *testing.T) { servers := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), } @@ -1336,7 +1338,7 @@ func TestGetKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetKeyspaces(ctx, tt.req) require.NoError(t, err) @@ -1546,7 +1548,7 @@ func TestGetSchema(t *testing.T) { defer cancel() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, tt.ts, nil, vtadmintestutil.TopodataTabletsFromVTAdminTablets(tt.tablets)...) @@ -1560,7 +1562,7 @@ func TestGetSchema(t *testing.T) { VtctldClient: client, Tablets: tt.tablets, }) - api := NewAPI([]*cluster.Cluster{c}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{c}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchema(ctx, tt.req) @@ -1690,7 +1692,7 @@ func TestGetSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() schema, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ @@ -2200,10 +2202,10 @@ func TestGetSchemas(t *testing.T) { vtctlds := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), } @@ -2244,7 +2246,7 @@ func TestGetSchemas(t *testing.T) { }) } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchemas(ctx, tt.req) @@ -2465,7 +2467,7 @@ func TestGetSchemas(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchemas(context.Background(), &vtadminpb.GetSchemasRequest{ @@ -2639,7 +2641,7 @@ func TestGetSrvKeyspace(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2658,7 +2660,7 @@ func TestGetSrvKeyspace(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvKeyspace(ctx, tt.req) if tt.shouldErr { @@ -2803,7 +2805,7 @@ func TestGetSrvKeyspaces(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2824,7 +2826,7 @@ func TestGetSrvKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvKeyspaces(ctx, tt.req) if tt.shouldErr { @@ -2968,7 +2970,7 @@ func TestGetSrvVSchema(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2987,7 +2989,7 @@ func TestGetSrvVSchema(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -3262,7 +3264,7 @@ func TestGetSrvVSchemas(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -3281,7 +3283,7 @@ func TestGetSrvVSchemas(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvVSchemas(ctx, tt.req) if tt.shouldErr { @@ -3552,7 +3554,7 @@ func TestGetTablet(t *testing.T) { }) } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetTablet(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3747,7 +3749,7 @@ func TestGetTablets(t *testing.T) { }) } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetTablets(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3878,7 +3880,7 @@ func TestGetVSchema(t *testing.T) { t.Parallel() clusters := []*cluster.Cluster{vtadmintestutil.BuildCluster(t, tt.clusterCfg)} - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetVSchema(ctx, tt.req) if tt.shouldErr { @@ -4208,7 +4210,7 @@ func TestGetVSchemas(t *testing.T) { } clusters := vtadmintestutil.BuildClusters(t, tt.clusterCfgs...) - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetVSchemas(ctx, tt.req) if tt.shouldErr { @@ -4292,7 +4294,7 @@ func TestGetVtctlds(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8()) + api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ctx := context.Background() resp, err := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) @@ -4424,7 +4426,7 @@ func TestGetWorkflow(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8()) + api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetWorkflow(ctx, tt.req) if tt.shouldErr { @@ -4863,7 +4865,7 @@ func TestGetWorkflows(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8()) + api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetWorkflows(ctx, tt.req) if tt.shouldErr { @@ -5114,7 +5116,7 @@ func TestVTExplain(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -5153,7 +5155,7 @@ func TestVTExplain(t *testing.T) { }), } - api := NewAPI(clusters, Options{}, collations.MySQL8()) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.VTExplain(ctx, tt.req) if tt.expectedError != nil { @@ -5355,7 +5357,7 @@ func TestServeHTTP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}, collations.MySQL8()) + api := NewAPI(tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}, collations.MySQL8(), sqlparser.NewTestParser()) // Copy the Cookie over to a new Request req := httptest.NewRequest(http.MethodGet, "/api/clusters", nil) diff --git a/go/vt/vtadmin/testutil/authztestgen/template.go b/go/vt/vtadmin/testutil/authztestgen/template.go index 5d50b3d3b3b..66d12e77b0b 100644 --- a/go/vt/vtadmin/testutil/authztestgen/template.go +++ b/go/vt/vtadmin/testutil/authztestgen/template.go @@ -46,6 +46,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -89,7 +90,7 @@ func Test{{ .Method }}(t *testing.T) { require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) {{ if not .SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -102,7 +103,7 @@ func Test{{ .Method }}(t *testing.T) { t.Run("{{ .Name }}", func(t *testing.T) { t.Parallel() {{ if $test.SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8()) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go index 9141d6b0c22..ca9dfe00dac 100644 --- a/go/vt/vtadmin/testutil/cluster.go +++ b/go/vt/vtadmin/testutil/cluster.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -169,7 +171,7 @@ func BuildIntegrationTestCluster(t testing.TB, ctx context.Context, c *vtadminpb ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := grpcvtctldtestutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) localclient := localvtctldclient.New(vtctld) diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 7f96cdc6f8f..4a2aa7ba411 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -84,6 +85,7 @@ func CreateTablet( mysqld mysqlctl.MysqlDaemon, dbcfgs *dbconfigs.DBConfigs, collationEnv *collations.Environment, + parser *sqlparser.Parser, ) error { alias := &topodatapb.TabletAlias{ Cell: cell, @@ -91,7 +93,7 @@ func CreateTablet( } log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) - controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias, collationEnv) + controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias, collationEnv, parser) initTabletType := tabletType if tabletType == topodatapb.TabletType_PRIMARY { initTabletType = topodatapb.TabletType_REPLICA @@ -107,6 +109,7 @@ func CreateTablet( DBConfigs: dbcfgs, QueryServiceControl: controller, CollationEnv: collationEnv, + SQLParser: parser, } tablet := &topodatapb.Tablet{ Alias: alias, @@ -173,6 +176,7 @@ func InitTabletMap( schemaDir string, ensureDatabase bool, collationEnv *collations.Environment, + parser *sqlparser.Parser, ) (uint32, error) { tabletMap = make(map[uint32]*comboTablet) @@ -188,11 +192,11 @@ func InitTabletMap( }) // iterate through the keyspaces - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv, parser) var uid uint32 = 1 for _, kpb := range tpb.Keyspaces { var err error - uid, err = CreateKs(ctx, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr, collationEnv) + uid, err = CreateKs(ctx, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr, collationEnv, parser) if err != nil { return 0, err } @@ -293,6 +297,7 @@ func CreateKs( uid uint32, wr *wrangler.Wrangler, collationEnv *collations.Environment, + parser *sqlparser.Parser, ) (uint32, error) { keyspace := kpb.Name @@ -342,7 +347,7 @@ func CreateKs( replicas-- // create the primary - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone(), collationEnv); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -350,7 +355,7 @@ func CreateKs( for i := 0; i < replicas; i++ { // create a replica tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone(), collationEnv); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -358,7 +363,7 @@ func CreateKs( for i := 0; i < rdonlys; i++ { // create a rdonly tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone(), collationEnv); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -376,7 +381,7 @@ func CreateKs( return 0, fmt.Errorf("cannot load vschema file %v for keyspace %v: %v", f, keyspace, err) } - _, err = vindexes.BuildKeyspace(formal) + _, err = vindexes.BuildKeyspace(formal, wr.SQLParser()) if err != nil { return 0, fmt.Errorf("BuildKeyspace(%v) failed: %v", keyspace, err) } diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index 24a7b1b6ab9..2475d92f150 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -6,6 +6,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/sqlparser" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -162,7 +163,7 @@ func TestGetSchema(t *testing.T) { logger := logutil.NewMemoryLogger() - err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8()), []string{ + err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()), []string{ "GetSchema", topoproto.TabletAliasString(tablet.Alias), }) @@ -202,7 +203,7 @@ func TestGetSchema(t *testing.T) { }, } - err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8()), []string{ + err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()), []string{ "GetSchema", "--table_sizes_only", topoproto.TabletAliasString(tablet.Alias), diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go index d1c471e7f2f..6d94ab22bd6 100644 --- a/go/vt/vtctl/endtoend/onlineddl_show_test.go +++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" @@ -121,7 +123,7 @@ func onlineDDLTest(t *testing.T, args []string, expectedQuery string) { tmclienttest.SetProtocol("go.vt.vtctl.endtoend", t.Name()) logger := logutil.NewMemoryLogger() - wr := wrangler.New(logger, fakeTopo, &tmc, collations.MySQL8()) + wr := wrangler.New(logger, fakeTopo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()) err := vtctl.RunCommand(ctx, wr, args) assert.Error(t, err) diff --git a/go/vt/vtctl/grpcvtctlclient/client_test.go b/go/vt/vtctl/grpcvtctlclient/client_test.go index c28fade5478..5349e2d1a3b 100644 --- a/go/vt/vtctl/grpcvtctlclient/client_test.go +++ b/go/vt/vtctl/grpcvtctlclient/client_test.go @@ -28,6 +28,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/grpcclient" @@ -54,7 +56,7 @@ func TestVtctlServer(t *testing.T) { // Create a gRPC server and listen on the port server := grpc.NewServer() - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8())) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8(), sqlparser.NewTestParser())) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server @@ -88,7 +90,7 @@ func TestVtctlAuthClient(t *testing.T) { opts = append(opts, grpc.UnaryInterceptor(servenv.FakeAuthUnaryInterceptor)) server := grpc.NewServer(opts...) - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8())) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8(), sqlparser.NewTestParser())) go server.Serve(listener) authJSON := `{ diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index 93c95ffa607..7166bafbcff 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -41,7 +42,7 @@ func TestFindAllShardsInKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { @@ -88,7 +89,7 @@ func TestGetKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { @@ -117,7 +118,7 @@ func TestGetKeyspaces(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index e017a7fd013..c3915792e4a 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -47,7 +48,7 @@ func TestInitShardPrimary(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -94,7 +95,7 @@ func TestInitShardPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) resp, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, @@ -110,7 +111,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -149,7 +150,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) _, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index b0b7bd370f7..db2e9d4471d 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -92,13 +92,13 @@ type VtctldServer struct { } // NewVtctldServer returns a new VtctldServer for the given topo server. -func NewVtctldServer(ts *topo.Server) *VtctldServer { +func NewVtctldServer(ts *topo.Server, parser *sqlparser.Parser) *VtctldServer { tmc := tmclient.NewTabletManagerClient() return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(ts, tmc, parser), } } @@ -108,7 +108,7 @@ func NewTestVtctldServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Vtc return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(ts, tmc, sqlparser.NewTestParser()), } } @@ -268,7 +268,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc logstream = append(logstream, e) }) - executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize) + executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize, s.ws.SQLParser()) if err = executor.SetDDLStrategy(req.DdlStrategy); err != nil { err = vterrors.Wrapf(err, "invalid DdlStrategy: %s", req.DdlStrategy) @@ -337,7 +337,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV span.Annotate("sql_mode", true) var stmt sqlparser.Statement - stmt, err = sqlparser.Parse(req.Sql) + stmt, err = s.ws.SQLParser().Parse(req.Sql) if err != nil { err = vterrors.Wrapf(err, "Parse(%s)", req.Sql) return nil, err @@ -368,7 +368,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil } - _, err = vindexes.BuildKeyspace(vs) + _, err = vindexes.BuildKeyspace(vs, s.ws.SQLParser()) if err != nil { err = vterrors.Wrapf(err, "BuildKeyspace(%s)", req.Keyspace) return nil, err @@ -4959,8 +4959,8 @@ func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.Work } // StartServer registers a VtctldServer for RPCs on the given gRPC server. -func StartServer(s *grpc.Server, ts *topo.Server) { - vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts)) +func StartServer(s *grpc.Server, ts *topo.Server, parser *sqlparser.Parser) { + vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts, parser)) } // getTopologyCell is a helper method that returns a topology cell given its path. diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 3100855e370..9625d0c281b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql" @@ -310,7 +312,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -608,7 +610,7 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -738,7 +740,7 @@ func TestSleepTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) start := time.Now() diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 124c7096bc4..fa223224ea0 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -28,6 +28,7 @@ import ( "time" _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -85,7 +86,7 @@ func TestPanicHandler(t *testing.T) { }() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, nil, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellInfo(context.Background(), nil) @@ -141,7 +142,7 @@ func TestAddCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellInfo(ctx, tt.req) if tt.shouldErr { @@ -214,7 +215,7 @@ func TestAddCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -326,7 +327,7 @@ func TestApplyRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ApplyRoutingRules(ctx, tt.req) if tt.shouldErr { @@ -421,7 +422,7 @@ func TestApplyVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ @@ -701,7 +702,7 @@ func TestBackup(t *testing.T) { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.Backup(ctx, tt.req) @@ -1041,7 +1042,7 @@ func TestBackupShard(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.BackupShard(ctx, tt.req) @@ -1261,7 +1262,7 @@ func TestCancelSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CancelSchemaMigration(ctx, test.req) @@ -1493,7 +1494,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: ts, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -1539,7 +1542,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: nil, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -1760,7 +1765,7 @@ func TestCleanupSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CleanupSchemaMigration(ctx, test.req) @@ -1962,7 +1967,7 @@ func TestForceCutOverSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ForceCutOverSchemaMigration(ctx, test.req) @@ -2166,7 +2171,7 @@ func TestCompleteSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CompleteSchemaMigration(ctx, test.req) @@ -2422,7 +2427,7 @@ func TestCreateKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) for name, ks := range tt.topo { @@ -2700,7 +2705,7 @@ func TestCreateShard(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) for _, ks := range tt.keyspaces { @@ -2755,7 +2760,7 @@ func TestDeleteCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteCellInfo(ctx, tt.req) if tt.shouldErr { @@ -2816,7 +2821,7 @@ func TestDeleteCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -3048,7 +3053,7 @@ func TestDeleteKeyspace(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) @@ -3557,12 +3562,12 @@ func TestDeleteShards(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -3705,7 +3710,7 @@ func TestDeleteSrvKeyspace(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -4166,7 +4171,7 @@ func TestDeleteTablets(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup tablets and shards @@ -4395,7 +4400,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -4537,7 +4542,7 @@ func TestExecuteFetchAsApp(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ExecuteFetchAsApp(ctx, tt.req) if tt.shouldErr { @@ -4664,7 +4669,7 @@ func TestExecuteFetchAsDBA(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ExecuteFetchAsDBA(ctx, tt.req) if tt.shouldErr { @@ -4849,7 +4854,7 @@ func TestExecuteHook(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ExecuteHook(ctx, tt.req) @@ -4870,7 +4875,7 @@ func TestFindAllShardsInKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) ks := &vtctldatapb.Keyspace{ @@ -4912,7 +4917,7 @@ func TestGetBackups(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.BackupStorage.Backups = map[string][]string{ @@ -5020,7 +5025,7 @@ func TestGetKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) expected := &vtctldatapb.GetKeyspaceResponse{ @@ -5046,7 +5051,7 @@ func TestGetCellInfoNames(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -5055,7 +5060,7 @@ func TestGetCellInfoNames(t *testing.T) { ts = memorytopo.NewServer(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err = vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -5064,7 +5069,7 @@ func TestGetCellInfoNames(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) topofactory.SetError(assert.AnError) @@ -5079,7 +5084,7 @@ func TestGetCellInfo(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) expected := &topodatapb.CellInfo{ @@ -5107,7 +5112,7 @@ func TestGetCellsAliases(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "c11", "c12", "c13", "c21", "c22") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) alias1 := &topodatapb.CellsAlias{ @@ -5134,7 +5139,7 @@ func TestGetCellsAliases(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) topofactory.SetError(assert.AnError) @@ -5214,7 +5219,9 @@ func TestGetFullStatus(t *testing.T) { FullStatusResult: &replicationdatapb.FullStatus{ ServerUuid: tt.serverUUID, }, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -5239,7 +5246,7 @@ func TestGetKeyspaces(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) @@ -5407,7 +5414,7 @@ func TestGetPermissions(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetPermissions(ctx, tt.req) if tt.shouldErr { @@ -5483,7 +5490,7 @@ func TestGetRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetRoutingRules(ctx, &vtctldatapb.GetRoutingRulesRequest{}) if tt.shouldErr { @@ -5508,7 +5515,7 @@ func TestGetSchema(t *testing.T) { }{}, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) validAlias := &topodatapb.TabletAlias{ @@ -5873,7 +5880,7 @@ func TestGetSchemaMigrations(t *testing.T) { ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{AlsoSetShardPrimary: true}, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if test.failTopo { @@ -5964,7 +5971,7 @@ func TestGetShard(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.topo...) @@ -6101,7 +6108,7 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetSrvKeyspaceNames(ctx, tt.req) if tt.shouldErr { @@ -6258,7 +6265,7 @@ func TestGetSrvKeyspaces(t *testing.T) { testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if tt.topoErr != nil { @@ -6285,7 +6292,7 @@ func TestGetSrvVSchema(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6496,7 +6503,7 @@ func TestGetSrvVSchemas(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2", "zone3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6556,7 +6563,7 @@ func TestGetTablet(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablet := &topodatapb.Tablet{ @@ -7179,7 +7186,7 @@ func TestGetTablets(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -7203,7 +7210,7 @@ func TestGetTopologyPath(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) err := ts.CreateKeyspace(ctx, "keyspace1", &topodatapb.Keyspace{}) @@ -7292,7 +7299,7 @@ func TestGetVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) t.Run("found", func(t *testing.T) { @@ -7523,7 +7530,7 @@ func TestLaunchSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.LaunchSchemaMigration(ctx, test.req) @@ -7610,7 +7617,7 @@ func TestPingTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PingTablet(ctx, tt.req) @@ -7858,7 +7865,7 @@ func TestPlannedReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -7901,7 +7908,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildKeyspaceGraph(ctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7918,7 +7925,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildKeyspaceGraph(context.Background(), &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7938,7 +7945,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) factory.SetError(assert.AnError) @@ -7959,7 +7966,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) lctx, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") @@ -8008,7 +8015,7 @@ func TestRebuildVSchemaGraph(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildVSchemaGraph(ctx, req) if tt.shouldErr { @@ -8107,7 +8114,7 @@ func TestRefreshState(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RefreshState(ctx, tt.req) if tt.shouldErr { @@ -8292,7 +8299,7 @@ func TestRefreshStateByShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.RefreshStateByShard(ctx, tt.req) if tt.shouldErr { @@ -8396,7 +8403,7 @@ func TestReloadSchema(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ReloadSchema(ctx, tt.req) if tt.shouldErr { @@ -8494,7 +8501,7 @@ func TestReloadSchemaKeyspace(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ReloadSchemaKeyspace(ctx, tt.req) if tt.shouldErr { @@ -8652,7 +8659,7 @@ func TestReloadSchemaShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ReloadSchemaShard(ctx, tt.req) if tt.shouldErr { @@ -8671,7 +8678,7 @@ func TestRemoveBackup(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) setup := func() { @@ -8862,7 +8869,7 @@ func TestRemoveKeyspaceCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup topo @@ -9151,7 +9158,7 @@ func TestRemoveShardCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup shard topos and replication graphs. @@ -9761,7 +9768,7 @@ func TestReparentTablet(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -9894,7 +9901,7 @@ func TestRestoreFromBackup(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.RestoreFromBackup(ctx, tt.req) @@ -10112,7 +10119,7 @@ func TestRetrySchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.RetrySchemaMigration(ctx, test.req) @@ -10219,7 +10226,7 @@ func TestRunHealthCheck(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RunHealthCheck(ctx, tt.req) if tt.shouldErr { @@ -10299,7 +10306,7 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetKeyspaceDurabilityPolicy(ctx, tt.req) if tt.expectedErr != "" { @@ -10358,7 +10365,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { name: "lock error", setup: func(t *testing.T, tt *testcase) context.Context { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10396,7 +10403,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetShardIsPrimaryServing(tt.ctx, tt.req) if tt.shouldErr { @@ -10610,7 +10617,7 @@ func TestSetShardTabletControl(t *testing.T) { name: "keyspace lock error", setup: func(t *testing.T, tt *testcase) { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10646,7 +10653,7 @@ func TestSetShardTabletControl(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetShardTabletControl(tt.ctx, tt.req) if tt.shouldErr { @@ -10850,7 +10857,7 @@ func TestSetWritable(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.SetWritable(ctx, tt.req) @@ -10871,7 +10878,7 @@ func TestShardReplicationAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablets := []*topodatapb.Tablet{ @@ -11166,7 +11173,7 @@ func TestShardReplicationPositions(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) requestCtx := ctx @@ -11197,7 +11204,7 @@ func TestShardReplicationRemove(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablets := []*topodatapb.Tablet{ @@ -11357,7 +11364,7 @@ func TestSourceShardAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11492,7 +11499,7 @@ func TestSourceShardDelete(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11684,7 +11691,7 @@ func TestStartReplication(t *testing.T) { AlsoSetShardPrimary: true, }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.StartReplication(ctx, tt.req) @@ -11821,7 +11828,7 @@ func TestStopReplication(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.StopReplication(ctx, tt.req) @@ -12208,7 +12215,7 @@ func TestTabletExternallyReparented(t *testing.T) { TopoServer: ts, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if tt.tmcHasNoTopo { @@ -12393,7 +12400,7 @@ func TestUpdateCellInfo(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.UpdateCellInfo(ctx, tt.req) if tt.shouldErr { @@ -12543,7 +12550,7 @@ func TestUpdateCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.UpdateCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -12651,7 +12658,7 @@ func TestValidate(t *testing.T) { SkipShardCreation: false, }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.Validate(ctx, &vtctldatapb.ValidateRequest{ @@ -12768,7 +12775,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) schema1 := &tabletmanagerdatapb.SchemaDefinition{ @@ -12954,7 +12961,7 @@ func TestValidateVersionKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tests := []*struct { @@ -13069,7 +13076,7 @@ func TestValidateVersionShard(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tests := []*struct { @@ -13661,7 +13668,7 @@ func TestValidateShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ValidateShard(ctx, tt.req) if tt.shouldErr { diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 20c51968a11..736bda4a1f4 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -1374,7 +1374,7 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t if resultsForTablet, ok := fake.VReplicationExecResults[key]; ok { // Round trip the expected query both to ensure it's valid and to // standardize on capitalization and formatting. - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return nil, err } diff --git a/go/vt/vtctl/grpcvtctlserver/server.go b/go/vt/vtctl/grpcvtctlserver/server.go index c7e8c2b1877..29fc4be0651 100644 --- a/go/vt/vtctl/grpcvtctlserver/server.go +++ b/go/vt/vtctl/grpcvtctlserver/server.go @@ -25,6 +25,8 @@ import ( "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" @@ -44,11 +46,12 @@ type VtctlServer struct { vtctlservicepb.UnimplementedVtctlServer ts *topo.Server collationEnv *collations.Environment + parser *sqlparser.Parser } // NewVtctlServer returns a new Vtctl Server for the topo server. -func NewVtctlServer(ts *topo.Server, collationEnv *collations.Environment) *VtctlServer { - return &VtctlServer{ts: ts, collationEnv: collationEnv} +func NewVtctlServer(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *VtctlServer { + return &VtctlServer{ts: ts, collationEnv: collationEnv, parser: parser} } // ExecuteVtctlCommand is part of the vtctldatapb.VtctlServer interface @@ -75,13 +78,13 @@ func (s *VtctlServer) ExecuteVtctlCommand(args *vtctldatapb.ExecuteVtctlCommandR // create the wrangler tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logger, s.ts, tmc, s.collationEnv) + wr := wrangler.New(logger, s.ts, tmc, s.collationEnv, s.parser) // execute the command return vtctl.RunCommand(stream.Context(), wr, args.Args) } // StartServer registers the VtctlServer for RPCs -func StartServer(s *grpc.Server, ts *topo.Server, collationEnv *collations.Environment) { - vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(ts, collationEnv)) +func StartServer(s *grpc.Server, ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) { + vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(ts, collationEnv, parser)) } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 96d8bf9e9be..9de6add6f28 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -2921,7 +2921,7 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf *migrationContext = *requestContext } - parts, err := sqlparser.SplitStatementToPieces(change) + parts, err := wr.SQLParser().SplitStatementToPieces(change) if err != nil { return err } @@ -3341,7 +3341,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p *sql = string(sqlBytes) } - stmt, err := sqlparser.Parse(*sql) + stmt, err := wr.SQLParser().Parse(*sql) if err != nil { return fmt.Errorf("error parsing vschema statement `%s`: %v", *sql, err) } @@ -3392,7 +3392,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p } // Validate the VSchema. - ksVs, err := vindexes.BuildKeyspace(vs) + ksVs, err := vindexes.BuildKeyspace(vs, wr.SQLParser()) if err != nil { return err } @@ -3424,7 +3424,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p return err } - if _, err := vindexes.BuildKeyspace(vs); err != nil { + if _, err := vindexes.BuildKeyspace(vs, wr.SQLParser()); err != nil { return err } diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go index b4c548575d5..eb9e7c25f32 100644 --- a/go/vt/vtctl/workflow/materializer.go +++ b/go/vt/vtctl/workflow/materializer.go @@ -63,6 +63,8 @@ type materializer struct { isPartial bool primaryVindexesDiffer bool workflowType binlogdatapb.VReplicationWorkflowType + + parser *sqlparser.Parser } func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, error) { @@ -197,7 +199,7 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.parser.Parse(ts.SourceExpression) if err != nil { return "", err } @@ -296,7 +298,7 @@ func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard * } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.parser.Parse(ts.SourceExpression) if err != nil { return nil, err } @@ -406,7 +408,7 @@ func (mz *materializer) deploySchema() error { if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.parser.TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -422,7 +424,7 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.parser) if err != nil { return err } @@ -431,7 +433,7 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.parser) if err != nil { return err } @@ -452,7 +454,7 @@ func (mz *materializer) deploySchema() error { // We use schemadiff to normalize the schema. // For now, and because this is could have wider implications, we ignore any errors in // reading the source schema. - schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs, mz.parser) if err != nil { log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) } else { @@ -484,7 +486,7 @@ func (mz *materializer) buildMaterializer() error { if err != nil { return err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, mz.parser) if err != nil { return err } diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go index 1026628405e..14ea59f690e 100644 --- a/go/vt/vtctl/workflow/materializer_env_test.go +++ b/go/vt/vtctl/workflow/materializer_env_test.go @@ -82,7 +82,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.ws = NewServer(env.topoServ, env.tmc) + parser := sqlparser.NewTestParser() + env.ws = NewServer(env.topoServ, env.tmc, parser) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -98,7 +99,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := parser.TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go index fc39bb4d30b..f9c1536ddbf 100644 --- a/go/vt/vtctl/workflow/materializer_test.go +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -28,6 +28,8 @@ import ( "golang.org/x/exp/maps" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -134,7 +136,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -208,7 +210,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -3013,7 +3015,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { cell: "cell", tmc: newTestMaterializerTMClient(), } - env.ws = NewServer(env.topoServ, env.tmc) + env.ws = NewServer(env.topoServ, env.tmc, sqlparser.NewTestParser()) defer env.close() tabletID := 100 diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index 5302b33edb8..3d82ccfd1c9 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -142,18 +142,24 @@ type Server struct { ts *topo.Server tmc tmclient.TabletManagerClient // Limit the number of concurrent background goroutines if needed. - sem *semaphore.Weighted + sem *semaphore.Weighted + parser *sqlparser.Parser } // NewServer returns a new server instance with the given topo.Server and // TabletManagerClient. -func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Server { +func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient, parser *sqlparser.Parser) *Server { return &Server{ - ts: ts, - tmc: tmc, + ts: ts, + tmc: tmc, + parser: parser, } } +func (s *Server) SQLParser() *sqlparser.Parser { + return s.parser +} + // CheckReshardingJournalExistsOnTablet returns the journal (or an empty // journal) and a boolean to indicate if the resharding_journal table exists on // the given tablet. @@ -407,7 +413,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows where, ) - vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc, s.SQLParser()) results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err @@ -1243,6 +1249,7 @@ func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSet sourceTs: s.ts, tmc: s.tmc, ms: ms, + parser: s.SQLParser(), } err := mz.createMaterializerStreams() @@ -1382,6 +1389,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl tmc: s.tmc, ms: ms, workflowType: workflowType, + parser: s.SQLParser(), } err = mz.createMoveTablesStreams(req) if err != nil { @@ -1836,7 +1844,7 @@ func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDe deleteReq := &tabletmanagerdatapb.DeleteVReplicationWorkflowRequest{ Workflow: req.Workflow, } - vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc, s.SQLParser()) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.DeleteVReplicationWorkflow(ctx, tablet.Tablet, deleteReq) if err != nil { @@ -2112,7 +2120,7 @@ func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUp span.Annotate("on_ddl", req.TabletRequest.OnDdl) span.Annotate("state", req.TabletRequest.State) - vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc, s.SQLParser()) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, req.TabletRequest) if err != nil { @@ -2525,7 +2533,7 @@ func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workf if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, s.SQLParser()) if err != nil { return nil, err } @@ -3114,7 +3122,7 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := BuildStreamMigrator(ctx, ts, cancel) + sm, err := BuildStreamMigrator(ctx, ts, cancel, s.parser) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -3444,7 +3452,7 @@ func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace str if !strings.Contains(vindex.Type, "lookup") { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = s.parser.ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex table name (%s) must be in the form .", vindex.Params["table"]) } diff --git a/go/vt/vtctl/workflow/server_test.go b/go/vt/vtctl/workflow/server_test.go index 85c60336351..e3b33e19dc9 100644 --- a/go/vt/vtctl/workflow/server_test.go +++ b/go/vt/vtctl/workflow/server_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo/topoproto" @@ -142,7 +144,7 @@ func TestCheckReshardingJournalExistsOnTablet(t *testing.T) { }, } - ws := NewServer(nil, tmc) + ws := NewServer(nil, tmc, sqlparser.NewTestParser()) journal, exists, err := ws.CheckReshardingJournalExistsOnTablet(ctx, tt.tablet, 1) if tt.shouldErr { assert.Error(t, err) diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index 75d509614b7..23d382d8062 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -61,14 +61,16 @@ type StreamMigrator struct { templates []*VReplicationStream ts ITrafficSwitcher logger logutil.Logger + parser *sqlparser.Parser } // BuildStreamMigrator creates a new StreamMigrator based on the given // TrafficSwitcher. -func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool) (*StreamMigrator, error) { +func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool, parser *sqlparser.Parser) (*StreamMigrator, error) { sm := &StreamMigrator{ ts: ts, logger: ts.Logger(), + parser: parser, } if sm.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { @@ -674,7 +676,7 @@ func (sm *StreamMigrator) templatizeRule(ctx context.Context, rule *binlogdatapb } func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogdatapb.Rule) error { - statement, err := sqlparser.Parse(rule.Filter) + statement, err := sm.parser.Parse(rule.Filter) if err != nil { return err } diff --git a/go/vt/vtctl/workflow/stream_migrator_test.go b/go/vt/vtctl/workflow/stream_migrator_test.go index 04f787eb4d4..38ae10280f7 100644 --- a/go/vt/vtctl/workflow/stream_migrator_test.go +++ b/go/vt/vtctl/workflow/stream_migrator_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -304,7 +306,7 @@ func TestTemplatize(t *testing.T) { }, }, } - ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks") + ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks", sqlparser.NewTestParser()) require.NoError(t, err, "could not create test keyspace %+v", vs) ts := &testTrafficSwitcher{ diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go index 4d1a3c5df4d..f56b721a415 100644 --- a/go/vt/vtctl/workflow/utils.go +++ b/go/vt/vtctl/workflow/utils.go @@ -167,8 +167,8 @@ func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.Materia return nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -189,8 +189,8 @@ func stripTableConstraints(ddl string) (string, error) { return newDDL, nil } -func stripTableForeignKeys(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } diff --git a/go/vt/vtctl/workflow/vexec/query_planner_test.go b/go/vt/vtctl/workflow/vexec/query_planner_test.go index 9199c8a0947..0baa1718b14 100644 --- a/go/vt/vtctl/workflow/vexec/query_planner_test.go +++ b/go/vt/vtctl/workflow/vexec/query_planner_test.go @@ -357,7 +357,7 @@ func TestVReplicationLogQueryPlanner(t *testing.T) { t.Parallel() planner := NewVReplicationLogQueryPlanner(nil, tt.targetStreamIDs) - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err, "could not parse query %q", tt.query) qp, err := planner.planSelect(stmt.(*sqlparser.Select)) if tt.shouldErr { diff --git a/go/vt/vtctl/workflow/vexec/testutil/query.go b/go/vt/vtctl/workflow/vexec/testutil/query.go index 3988f7a112f..1add74e5b02 100644 --- a/go/vt/vtctl/workflow/vexec/testutil/query.go +++ b/go/vt/vtctl/workflow/vexec/testutil/query.go @@ -41,7 +41,7 @@ func ParsedQueryFromString(t *testing.T, query string) *sqlparser.ParsedQuery { func StatementFromString(t *testing.T, query string) sqlparser.Statement { t.Helper() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err, "could not parse query %v", query) return stmt diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index 477b81a1a03..54591b2c306 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -95,6 +95,8 @@ type VExec struct { // to support running in modes like: // - Execute serially rather than concurrently. // - Only return error if greater than some percentage of the targets fail. + + parser *sqlparser.Parser } // NewVExec returns a new instance suitable for making vexec queries to a given @@ -102,12 +104,13 @@ type VExec struct { // string). The provided topo server is used to look up target tablets for // queries. A given instance will discover targets exactly once for its // lifetime, so to force a refresh, create another instance. -func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient) *VExec { +func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient, parser *sqlparser.Parser) *VExec { return &VExec{ ts: ts, tmc: tmc, keyspace: keyspace, workflow: workflow, + parser: parser, } } @@ -127,7 +130,7 @@ func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.Tabl } } - stmt, err := sqlparser.Parse(query) + stmt, err := vx.parser.Parse(query) if err != nil { return nil, err } @@ -299,6 +302,7 @@ func (vx *VExec) WithWorkflow(workflow string) *VExec { ts: vx.ts, tmc: vx.tmc, primaries: vx.primaries, + parser: vx.parser, workflow: workflow, } } diff --git a/go/vt/vtctld/action_repository.go b/go/vt/vtctld/action_repository.go index 1ea0def22fb..095beb2ae90 100644 --- a/go/vt/vtctld/action_repository.go +++ b/go/vt/vtctld/action_repository.go @@ -23,6 +23,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/acl" @@ -86,17 +88,19 @@ type ActionRepository struct { tabletActions map[string]actionTabletRecord ts *topo.Server collationEnv *collations.Environment + parser *sqlparser.Parser } // NewActionRepository creates and returns a new ActionRepository, // with no actions. -func NewActionRepository(ts *topo.Server, collationEnv *collations.Environment) *ActionRepository { +func NewActionRepository(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *ActionRepository { return &ActionRepository{ keyspaceActions: make(map[string]actionKeyspaceMethod), shardActions: make(map[string]actionShardMethod), tabletActions: make(map[string]actionTabletRecord), ts: ts, collationEnv: collationEnv, + parser: parser, } } @@ -129,7 +133,7 @@ func (ar *ActionRepository) ApplyKeyspaceAction(ctx context.Context, actionName, } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action(ctx, wr, keyspace) cancel() if err != nil { @@ -156,7 +160,7 @@ func (ar *ActionRepository) ApplyShardAction(ctx context.Context, actionName, ke } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action(ctx, wr, keyspace, shard) cancel() if err != nil { @@ -190,7 +194,7 @@ func (ar *ActionRepository) ApplyTabletAction(ctx context.Context, actionName st // run the action ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action.method(ctx, wr, tabletAlias) cancel() if err != nil { diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index a4979286727..92778ce83af 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -28,8 +28,6 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/log" @@ -180,7 +178,7 @@ func unmarshalRequest(r *http.Request, v any) error { return json.Unmarshal(data, v) } -func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, collationEnv *collations.Environment) { +func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { tabletHealthCache := newTabletHealthCache(ts) tmClient := tmclient.NewTabletManagerClient() @@ -489,7 +487,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, co logstream := logutil.NewMemoryLogger() - wr := wrangler.New(logstream, ts, tmClient, collationEnv) + wr := wrangler.New(logstream, ts, tmClient, actions.collationEnv, actions.parser) err := vtctl.RunCommand(r.Context(), wr, args) if err != nil { resp.Error = err.Error() @@ -525,7 +523,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, co logger := logutil.NewCallbackLogger(func(ev *logutilpb.Event) { w.Write([]byte(logutil.EventString(ev))) }) - wr := wrangler.New(logger, ts, tmClient, collationEnv) + wr := wrangler.New(logger, ts, tmClient, actions.collationEnv, actions.parser) apiCallUUID, err := schema.CreateUUID() if err != nil { @@ -533,7 +531,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, co } requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0) + executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0, actions.parser) if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil { return fmt.Errorf("error setting DDL strategy: %v", err) } diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index 98b4b72151a..780ff26e6ff 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/servenv/testutils" @@ -49,7 +51,7 @@ func TestAPI(t *testing.T) { cells := []string{"cell1", "cell2"} ts := memorytopo.NewServer(ctx, cells...) defer ts.Close() - actionRepo := NewActionRepository(ts, collations.MySQL8()) + actionRepo := NewActionRepository(ts, collations.MySQL8(), sqlparser.NewTestParser()) server := testutils.HTTPTestServer() defer server.Close() @@ -124,7 +126,7 @@ func TestAPI(t *testing.T) { return "TestTabletAction Result", nil }) - initAPI(ctx, ts, actionRepo, collations.MySQL8()) + initAPI(ctx, ts, actionRepo) // all-tablets response for keyspace/ks1/tablets/ endpoints keyspaceKs1AllTablets := `[ diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index 4c6fe99df29..12df3b27c6a 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -25,6 +25,8 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" @@ -114,7 +116,7 @@ func TestTabletData(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") defer ts.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index 708c44e9059..8093ded1371 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -23,6 +23,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/servenv" @@ -50,8 +52,8 @@ func registerVtctldFlags(fs *pflag.FlagSet) { } // InitVtctld initializes all the vtctld functionality. -func InitVtctld(ts *topo.Server, collationEnv *collations.Environment) error { - actionRepo := NewActionRepository(ts, collationEnv) +func InitVtctld(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) error { + actionRepo := NewActionRepository(ts, collationEnv, parser) // keyspace actions actionRepo.RegisterKeyspaceAction("ValidateKeyspace", @@ -128,7 +130,7 @@ func InitVtctld(ts *topo.Server, collationEnv *collations.Environment) error { }) // Serve the REST API - initAPI(context.Background(), ts, actionRepo, collationEnv) + initAPI(context.Background(), ts, actionRepo) // Serve the topology endpoint in the REST API at /topodata initExplorer(ts) diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index fbde759018f..8f7cec502c2 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -148,6 +148,7 @@ type ( globalTabletEnv *tabletEnv collationEnv *collations.Environment + parser *sqlparser.Parser } ) @@ -183,13 +184,13 @@ type TabletActions struct { } // Init sets up the fake execution environment -func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options, collationEnv *collations.Environment) (*VTExplain, error) { +func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options, collationEnv *collations.Environment, parser *sqlparser.Parser) (*VTExplain, error) { // Verify options if opts.ReplicationMode != "ROW" && opts.ReplicationMode != "STATEMENT" { return nil, fmt.Errorf("invalid replication mode \"%s\"", opts.ReplicationMode) } - parsedDDLs, err := parseSchema(sqlSchema, opts) + parsedDDLs, err := parseSchema(sqlSchema, opts, parser) if err != nil { return nil, fmt.Errorf("parseSchema: %v", err) } @@ -204,6 +205,7 @@ func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts Autocommit: true, }, collationEnv: collationEnv, + parser: parser, } vte.setGlobalTabletEnv(tabletEnv) err = vte.initVtgateExecutor(ctx, vSchemaStr, ksShardMapStr, opts) @@ -232,10 +234,10 @@ func (vte *VTExplain) Stop() { } } -func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, error) { +func parseSchema(sqlSchema string, opts *Options, parser *sqlparser.Parser) ([]sqlparser.DDLStatement, error) { parsedDDLs := make([]sqlparser.DDLStatement, 0, 16) for { - sql, rem, err := sqlparser.SplitStatement(sqlSchema) + sql, rem, err := parser.SplitStatement(sqlSchema) sqlSchema = rem if err != nil { return nil, err @@ -250,12 +252,12 @@ func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, err var stmt sqlparser.Statement if opts.StrictDDL { - stmt, err = sqlparser.ParseStrictDDL(sql) + stmt, err = parser.ParseStrictDDL(sql) if err != nil { return nil, err } } else { - stmt, err = sqlparser.Parse(sql) + stmt, err = parser.Parse(sql) if err != nil { log.Errorf("ERROR: failed to parse sql: %s, got error: %v", sql, err) continue @@ -299,7 +301,7 @@ func (vte *VTExplain) Run(sql string) ([]*Explain, error) { sql = s } - sql, rem, err = sqlparser.SplitStatement(sql) + sql, rem, err = vte.parser.SplitStatement(sql) if err != nil { return nil, err } @@ -386,7 +388,7 @@ func (vte *VTExplain) specialHandlingOfSavepoints(q *MysqlQuery) error { return nil } - stmt, err := sqlparser.Parse(q.SQL) + stmt, err := vte.parser.Parse(q.SQL) if err != nil { return err } diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go index b64dba590c6..257b1d38406 100644 --- a/go/vt/vtexplain/vtexplain_test.go +++ b/go/vt/vtexplain/vtexplain_test.go @@ -28,6 +28,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" @@ -67,7 +69,7 @@ func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, } opts.ExecutionMode = mode - vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts, collations.MySQL8()) + vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts, collations.MySQL8(), sqlparser.NewTestParser()) require.NoError(t, err, "vtexplain Init error\n%s", string(schema)) return vte } @@ -346,7 +348,7 @@ func TestInit(t *testing.T) { } }` schema := "create table table_missing_primary_vindex (id int primary key)" - _, err := Init(ctx, vschema, schema, "", defaultTestOpts(), collations.MySQL8()) + _, err := Init(ctx, vschema, schema, "", defaultTestOpts(), collations.MySQL8(), sqlparser.NewTestParser()) require.Error(t, err) require.Contains(t, err.Error(), "missing primary col vindex") } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index d9e4c76321a..80994ef9474 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -75,7 +75,7 @@ func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShar var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests queryLogBufferSize := 10 plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) - vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0, vte.collationEnv) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0, vte.collationEnv, vte.parser) vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil @@ -107,7 +107,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS if err != nil { return err } - schema := vindexes.BuildVSchema(&srvVSchema) + schema := vindexes.BuildVSchema(&srvVSchema, vte.parser) for ks, ksSchema := range schema.Keyspaces { if ksSchema.Error != nil { return vterrors.Wrapf(ksSchema.Error, "vschema failed to load on keyspace [%s]", ks) @@ -143,7 +143,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) tablet := vte.healthCheck.AddFakeTablet(vtexplainCell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { - return vte.newTablet(ctx, opts, t, vte.collationEnv) + return vte.newTablet(ctx, opts, t, vte.collationEnv, vte.parser) }) vte.explainTopo.TabletConns[hostname] = tablet.(*explainTablet) vte.explainTopo.KeyspaceShards[ks][shard.Name] = shard diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index a9e09571626..fa01fc319b4 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -104,9 +104,9 @@ type explainTablet struct { var _ queryservice.QueryService = (*explainTablet)(nil) -func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet, collationEnv *collations.Environment) *explainTablet { +func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet, collationEnv *collations.Environment, parser *sqlparser.Parser) *explainTablet { db := fakesqldb.New(nil) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, vte.parser) config := tabletenv.NewCurrentConfig() config.TrackSchemaVersions = false @@ -119,7 +119,7 @@ func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatap config.EnableTableGC = false // XXX much of this is cloned from the tabletserver tests - tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias, collationEnv) + tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias, collationEnv, parser) tablet := explainTablet{db: db, tsv: tsv, vte: vte, collationEnv: collationEnv} db.Handler = &tablet @@ -583,7 +583,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { // Parse the select statement to figure out the table and columns // that were referenced so that the synthetic response has the // expected field names and types. - stmt, err := sqlparser.Parse(query) + stmt, err := t.vte.parser.Parse(query) if err != nil { return nil, err } diff --git a/go/vt/vtexplain/vtexplain_vttablet_test.go b/go/vt/vtexplain/vtexplain_vttablet_test.go index 444a2580042..601df4b8e79 100644 --- a/go/vt/vtexplain/vtexplain_vttablet_test.go +++ b/go/vt/vtexplain/vtexplain_vttablet_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -72,7 +74,9 @@ create table t2 ( ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vte, err := Init(ctx, testVSchema, testSchema, "", opts, collations.MySQL8()) + collationEnv := collations.MySQL8() + parser := sqlparser.NewTestParser() + vte, err := Init(ctx, testVSchema, testSchema, "", opts, collationEnv, parser) require.NoError(t, err) defer vte.Stop() @@ -119,8 +123,9 @@ create table test_partitioned ( PARTITION p2018_06_16 VALUES LESS THAN (1529132400) ENGINE = InnoDB, PARTITION p2018_06_17 VALUES LESS THAN (1529218800) ENGINE = InnoDB)*/; ` - - ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}) + collationEnv := collations.MySQL8() + parser := sqlparser.NewTestParser() + ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}, parser) if err != nil { t.Fatalf("parseSchema: %v", err) } @@ -130,14 +135,14 @@ create table test_partitioned ( vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) defer vte.Stop() - tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts(), collations.MySQL8()) + tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts(), collationEnv) vte.setGlobalTabletEnv(tabletEnv) tablet := vte.newTablet(ctx, defaultTestOpts(), &topodatapb.Tablet{ Keyspace: "test_keyspace", Shard: "-80", Alias: &topodatapb.TabletAlias{}, - }, collations.MySQL8()) + }, collationEnv, parser) se := tablet.tsv.SchemaEngine() tables := se.GetSchema() @@ -183,7 +188,7 @@ create table test_partitioned ( func TestErrParseSchema(t *testing.T) { testSchema := `create table t1 like t2` - ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}) + ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}, sqlparser.NewTestParser()) require.NoError(t, err) _, err = newTabletEnvironment(ddl, defaultTestOpts(), collations.MySQL8()) diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index aca8bcf1a0c..93c5e97bd89 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -680,7 +680,7 @@ func (cached *OnlineDDL) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(64) } // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace size += cached.Keyspace.CachedSize(true) @@ -696,8 +696,6 @@ func (cached *OnlineDDL) CachedSize(alloc bool) int64 { if cc, ok := cached.TargetDestination.(cachedObject); ok { size += cc.CachedSize(true) } - // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment - size += cached.CollationEnv.CachedSize(true) return size } func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { diff --git a/go/vt/vtgate/engine/ddl_test.go b/go/vt/vtgate/engine/ddl_test.go index 5d6e0d85a78..3f7ccb75f70 100644 --- a/go/vt/vtgate/engine/ddl_test.go +++ b/go/vt/vtgate/engine/ddl_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -34,9 +33,7 @@ func TestDDL(t *testing.T) { Table: sqlparser.NewTableName("a"), }, DirectDDLEnabled: true, - OnlineDDL: &OnlineDDL{ - CollationEnv: collations.MySQL8(), - }, + OnlineDDL: &OnlineDDL{}, NormalDDL: &Send{ Keyspace: &vindexes.Keyspace{ Name: "ks", diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index 03d9e80a0ab..b8e1c911ebf 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -137,6 +137,11 @@ func (t *noopVCursor) CollationEnv() *collations.Environment { return collations.MySQL8() } +// SQLParser implements VCursor +func (t *noopVCursor) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + func (t *noopVCursor) TimeZone() *time.Location { return nil } @@ -417,6 +422,8 @@ type loggingVCursor struct { ksShardMap map[string][]string shardSession []*srvtopo.ResolvedShard + + parser *sqlparser.Parser } func (f *loggingVCursor) HasCreatedTempTable() { @@ -803,13 +810,21 @@ func (f *loggingVCursor) nextResult() (*sqltypes.Result, error) { } func (f *loggingVCursor) CanUseSetVar() bool { - useSetVar := sqlparser.IsMySQL80AndAbove() && !f.disableSetVar + useSetVar := f.SQLParser().IsMySQL80AndAbove() && !f.disableSetVar if useSetVar { f.log = append(f.log, "SET_VAR can be used") } return useSetVar } +// SQLParser implements VCursor +func (t *loggingVCursor) SQLParser() *sqlparser.Parser { + if t.parser == nil { + return sqlparser.NewTestParser() + } + return t.parser +} + func (t *noopVCursor) VExplainLogging() {} func (t *noopVCursor) DisableLogging() {} func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry { diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 217492a529f..e870ffa18c0 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -194,7 +194,7 @@ func TestInsertShardedSimple(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -336,7 +336,7 @@ func TestInsertShardWithONDuplicateKey(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -489,7 +489,7 @@ func TestInsertShardedFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -539,7 +539,7 @@ func TestInsertShardedGenerate(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -658,7 +658,7 @@ func TestInsertShardedOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -768,7 +768,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -845,7 +845,7 @@ func TestInsertShardedGeo(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -951,7 +951,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1107,7 +1107,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1205,7 +1205,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1333,7 +1333,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1439,7 +1439,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1516,7 +1516,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1630,7 +1630,7 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1677,7 +1677,7 @@ func TestInsertSelectSimple(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -1760,7 +1760,7 @@ func TestInsertSelectOwned(t *testing.T) { Name: "onecol", Columns: []string{"c3"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -1861,7 +1861,7 @@ func TestInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -1953,7 +1953,7 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2050,7 +2050,7 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2137,7 +2137,7 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2234,7 +2234,7 @@ func TestInsertSelectUnowned(t *testing.T) { Name: "onecol", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2338,7 +2338,7 @@ func TestInsertSelectShardingCases(t *testing.T) { "uks2": {Tables: map[string]*vschemapb.Table{"u2": {}}}, }} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) sks1 := vs.Keyspaces["sks1"] sks2 := vs.Keyspaces["sks2"] uks1 := vs.Keyspaces["uks1"] diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index d93e331430a..62126da4d08 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -42,8 +41,6 @@ type OnlineDDL struct { // TargetDestination specifies an explicit target destination to send the query to. TargetDestination key.Destination - CollationEnv *collations.Environment - noTxNeeded noInputs @@ -81,7 +78,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma { Name: "uuid", Type: sqltypes.VarChar, - Charset: uint32(v.CollationEnv.DefaultConnectionCharset()), + Charset: uint32(vcursor.CollationEnv().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{}, @@ -92,7 +89,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma migrationContext = fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()) } onlineDDLs, err := schema.NewOnlineDDLs(v.GetKeyspaceName(), v.SQL, v.DDL, - v.DDLStrategySetting, migrationContext, "", + v.DDLStrategySetting, migrationContext, "", vcursor.SQLParser(), ) if err != nil { return result, err diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index af6ccceb9ff..a9627d358bc 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -88,6 +88,7 @@ type ( ConnCollation() collations.ID CollationEnv() *collations.Environment + SQLParser() *sqlparser.Parser TimeZone() *time.Location SQLMode() string diff --git a/go/vt/vtgate/engine/revert_migration.go b/go/vt/vtgate/engine/revert_migration.go index e7237d01da4..23275ddd043 100644 --- a/go/vt/vtgate/engine/revert_migration.go +++ b/go/vt/vtgate/engine/revert_migration.go @@ -88,7 +88,7 @@ func (v *RevertMigration) TryExecute(ctx context.Context, vcursor VCursor, bindV return nil, err } ddlStrategySetting.Strategy = schema.DDLStrategyOnline // and we keep the options as they were - onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "") + onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "", vcursor.SQLParser()) if err != nil { return result, err } diff --git a/go/vt/vtgate/engine/set_test.go b/go/vt/vtgate/engine/set_test.go index dbce162ff87..0677ee40bd8 100644 --- a/go/vt/vtgate/engine/set_test.go +++ b/go/vt/vtgate/engine/set_test.go @@ -363,7 +363,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed additional - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -383,7 +383,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed less - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -420,7 +420,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -459,7 +459,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -479,7 +479,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change to empty - non empty orig - MySQL80 - should use reserved conn", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -499,7 +499,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80 - SET_VAR disabled", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -520,7 +520,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "sql_mode set an unsupported mode", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -540,7 +540,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "default_week_format change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "default_week_format", @@ -565,23 +565,22 @@ func TestSetTable(t *testing.T) { tc.input = &SingleRow{} } - oldMySQLVersion := sqlparser.GetParserVersion() - defer func() { sqlparser.SetParserVersion(oldMySQLVersion) }() - if tc.mysqlVersion != "" { - sqlparser.SetParserVersion(tc.mysqlVersion) - } - set := &Set{ Ops: tc.setOps, Input: tc.input, } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: tc.mysqlVersion, + }) + require.NoError(t, err) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, results: tc.qr, multiShardErrs: []error{tc.execErr}, disableSetVar: tc.disableSetVar, + parser: parser, } - _, err := set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + _, err = set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) if tc.expectedError == "" { require.NoError(t, err) } else { diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index e2ee9d553d1..9d583cdcfcf 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -21,6 +21,7 @@ import ( "errors" "testing" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -1023,7 +1024,7 @@ func buildTestVSchema() *vindexes.VSchema { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) return vs } diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go index ad540f96c9c..23b8ebc2139 100644 --- a/go/vt/vtgate/engine/vexplain.go +++ b/go/vt/vtgate/engine/vexplain.go @@ -122,7 +122,7 @@ func (v *VExplain) convertToVExplainAllResult(ctx context.Context, vcursor VCurs explainQuery := fmt.Sprintf("explain format = json %v", entry.Query) // We rely on the parser to see if the query we have is explainable or not // If we get an error in parsing then we can't execute explain on the given query, and we skip it - _, err := sqlparser.Parse(explainQuery) + _, err := vcursor.SQLParser().Parse(explainQuery) if err != nil { continue } diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index b4628e2c2da..a0c29e1510f 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -98,7 +98,7 @@ func TestCompilerReference(t *testing.T) { defer func() { evalengine.SystemTime = time.Now }() track := NewTracker() - + parser := sqlparser.NewTestParser() for _, tc := range testcases.Cases { t.Run(tc.Name(), func(t *testing.T) { var supported, total int @@ -107,7 +107,7 @@ func TestCompilerReference(t *testing.T) { tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - stmt, err := sqlparser.ParseExpr(query) + stmt, err := parser.ParseExpr(query) if err != nil { // no need to test un-parseable queries return @@ -577,10 +577,10 @@ func TestCompilerSingle(t *testing.T) { } tz, _ := time.LoadLocation("Europe/Madrid") - + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -657,9 +657,10 @@ func TestBindVarLiteral(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -722,9 +723,10 @@ func TestCompilerNonConstant(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go index 9eecd89f11a..8e401fd19f6 100644 --- a/go/vt/vtgate/evalengine/integration/fuzz_test.go +++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go @@ -131,7 +131,7 @@ func errorsMatch(remote, local error) bool { } func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields []*querypb.Field) (evalengine.EvalResult, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return evalengine.EvalResult{}, err } @@ -233,7 +233,7 @@ func TestGenerateFuzzCases(t *testing.T) { var start = time.Now() for len(failures) < fuzzMaxFailures { query := "SELECT " + gen.expr() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatal(err) } diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go index 08bdac28f1e..eac881ba850 100644 --- a/go/vt/vtgate/evalengine/mysql_test.go +++ b/go/vt/vtgate/evalengine/mysql_test.go @@ -62,7 +62,7 @@ func knownBadQuery(e Expr) bool { var errKnownBadQuery = errors.New("this query is known to give bad results in MySQL") func convert(t *testing.T, query string, simplify bool) (Expr, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatalf("failed to parse '%s': %v", query, err) } diff --git a/go/vt/vtgate/evalengine/perf_test.go b/go/vt/vtgate/evalengine/perf_test.go index 0f14397d5de..b1ac1536822 100644 --- a/go/vt/vtgate/evalengine/perf_test.go +++ b/go/vt/vtgate/evalengine/perf_test.go @@ -22,8 +22,9 @@ func BenchmarkCompilerExpressions(b *testing.B) { {"comparison_f", "column0 = 12", []sqltypes.Value{sqltypes.NewFloat64(420.0)}}, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { b.Fatal(err) } diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go index b721bf3597f..ecf569fccea 100644 --- a/go/vt/vtgate/evalengine/translate_test.go +++ b/go/vt/vtgate/evalengine/translate_test.go @@ -115,7 +115,7 @@ func TestTranslateSimplification(t *testing.T) { for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - stmt, err := sqlparser.Parse("select " + tc.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + tc.expression) if err != nil { t.Fatal(err) } @@ -300,7 +300,7 @@ func TestEvaluate(t *testing.T) { for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr sqltypesExpr, err := Translate(astExpr, &Config{ @@ -348,7 +348,7 @@ func TestEvaluateTuple(t *testing.T) { for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr collationEnv := collations.MySQL8() @@ -389,7 +389,7 @@ func TestTranslationFailures(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + testcase.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr _, err = Translate(astExpr, &Config{ @@ -425,7 +425,7 @@ func TestCardinalityWithBindVariables(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.expr, func(t *testing.T) { err := func() error { - stmt, err := sqlparser.Parse("select " + testcase.expr) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expr) if err != nil { return err } diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index b3a64911d56..e1ea404cfa9 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -123,7 +123,8 @@ type Executor struct { warmingReadsPercent int warmingReadsChannel chan bool - collationEnv *collations.Environment + collEnv *collations.Environment + parser *sqlparser.Parser } var executorOnce sync.Once @@ -155,6 +156,7 @@ func NewExecutor( pv plancontext.PlannerVersion, warmingReadsPercent int, collationEnv *collations.Environment, + parser *sqlparser.Parser, ) *Executor { e := &Executor{ serv: serv, @@ -171,7 +173,8 @@ func NewExecutor( plans: plans, warmingReadsPercent: warmingReadsPercent, warmingReadsChannel: make(chan bool, warmingReadsConcurrency), - collationEnv: collationEnv, + collEnv: collationEnv, + parser: parser, } vschemaacl.Init() @@ -181,6 +184,7 @@ func NewExecutor( serv: serv, cell: cell, schema: e.schemaTracker, + parser: parser, } serv.WatchSrvVSchema(ctx, cell, e.vm.VSchemaUpdate) @@ -227,7 +231,7 @@ func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn } if result != nil && len(result.Rows) > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.parser.RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } @@ -361,7 +365,7 @@ func (e *Executor) StreamExecute( saveSessionStats(safeSession, srr.stmtType, srr.rowsAffected, srr.insertID, srr.rowsReturned, err) if srr.rowsReturned > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.parser.RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } @@ -503,14 +507,14 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars bindVars[key] = sqltypes.StringBindVariable(mysqlSocketPath()) default: if value, hasSysVar := session.SystemVariables[sysVar]; hasSysVar { - expr, err := sqlparser.ParseExpr(value) + expr, err := e.parser.ParseExpr(value) if err != nil { return err } evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ Collation: vcursor.collation, - CollationEnv: vcursor.collationEnv, + CollationEnv: e.collEnv, SQLMode: evalengine.ParseSQLMode(vcursor.SQLMode()), }) if err != nil { @@ -1343,9 +1347,9 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) { query, comments := sqlparser.SplitMarginComments(sql) - vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv, e.collationEnv) + vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return nil, err } @@ -1380,8 +1384,8 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return qr.Fields, err } -func parseAndValidateQuery(query string) (sqlparser.Statement, *sqlparser.ReservedVars, error) { - stmt, reserved, err := sqlparser.Parse2(query) +func parseAndValidateQuery(query string, parser *sqlparser.Parser) (sqlparser.Statement, *sqlparser.ReservedVars, error) { + stmt, reserved, err := parser.Parse2(query) if err != nil { return nil, nil, err } @@ -1516,7 +1520,7 @@ func (e *Executor) ReleaseLock(ctx context.Context, session *SafeSession) error // planPrepareStmt implements the IExecutor interface func (e *Executor) planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) { - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return nil, nil, err } @@ -1549,3 +1553,11 @@ func (e *Executor) Close() { topo.Close() e.plans.Close() } + +func (e *Executor) collationEnv() *collations.Environment { + return e.collEnv +} + +func (e *Executor) sqlparser() *sqlparser.Parser { + return e.parser +} diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 961e6e32eca..4ef598d2e61 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" @@ -532,7 +532,7 @@ func TestUpdateMultiOwned(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult( @@ -1469,7 +1469,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) _, err := executorExecSession(ctx, executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) require.NoError(t, err) @@ -2268,7 +2268,7 @@ func TestInsertBadAutoInc(t *testing.T) { } } ` - executor, _, _, _, ctx := createCustomExecutor(t, vschema) + executor, _, _, _, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) // If auto inc table cannot be found, the table should not be added to vschema. session := &vtgatepb.Session{ diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index ab149fdb26d..831e133770a 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -26,29 +26,25 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/logstats" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/cache/theine" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -186,7 +182,7 @@ func createExecutorEnvCallback(t testing.TB, eachShard func(shard, ks string, ta // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} @@ -214,7 +210,7 @@ func createExecutorEnv(t testing.TB) (executor *Executor, sbc1, sbc2, sbclookup return } -func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { +func createCustomExecutor(t testing.TB, vschema string, mysqlVersion string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { var cancel context.CancelFunc ctx, cancel = context.WithCancel(context.Background()) cell := "aa" @@ -233,7 +229,10 @@ func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: mysqlVersion}) + require.NoError(t, err) + collationEnv := collations.NewEnvironment(mysqlVersion) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collationEnv, parser) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -270,7 +269,7 @@ func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqlty sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -295,7 +294,7 @@ func createExecutorEnvWithPrimaryReplicaConn(t testing.TB, ctx context.Context, replica = hc.AddTestTablet(cell, "0-replica", 1, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent, collations.MySQL8()) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index f83b3965494..af40c0e17b3 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -29,11 +29,10 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/vt/sqlparser" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -160,10 +159,8 @@ func TestSelectDBA(t *testing.T) { } func TestSystemVariablesMySQLBelow80(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "5.7.0") executor.normalize = true - - sqlparser.SetParserVersion("57000") setVarEnabled = true session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) @@ -196,10 +193,9 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { } func TestSystemVariablesWithSetVarDisabled(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - sqlparser.SetParserVersion("80000") setVarEnabled = false defer func() { setVarEnabled = true @@ -234,11 +230,9 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { } func TestSetSystemVariablesTx(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.1") executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) _, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) @@ -285,8 +279,6 @@ func TestSetSystemVariables(t *testing.T) { executor, _, _, lookup, _ := createExecutorEnv(t) executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}}) // Set @@sql_mode and execute a select statement. We should have SET_VAR in the select statement @@ -1563,7 +1555,7 @@ func TestStreamSelectIN(t *testing.T) { func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) ex.SetQueryLogger(queryLogger) return ex } @@ -3189,7 +3181,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) defer executor.Close() // some sleep for all goroutines to start @@ -4124,7 +4116,7 @@ func TestSelectCFC(t *testing.T) { func TestSelectView(t *testing.T) { executor, sbc, _, _, _ := createExecutorEnv(t) // add the view to local vschema - err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id") + err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", executor.vm.parser) require.NoError(t, err) executor.normalize = true diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 5377f72c66b..5e66899db44 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -21,8 +21,6 @@ import ( "testing" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sqlparser" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/test/utils" @@ -507,14 +505,9 @@ func createMap(keys []string, values []any) map[string]*querypb.BindVariable { } func TestSetVar(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( @@ -551,14 +544,9 @@ func TestSetVar(t *testing.T) { } func TestSetVarShowVariables(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{ diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index aa55ddcdb31..076225b158c 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -21,19 +21,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/discovery" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/sqltypes" _ "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -69,7 +68,7 @@ func TestStreamSQLSharded(t *testing.T) { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8()) + executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) defer executor.Close() diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 7b3f159a5e7..b0ab47ebed3 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -103,7 +103,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { for _, test := range testCases { sbclookup.SetResults([]*sqltypes.Result{result}) - stmt, err := sqlparser.Parse(test.query) + stmt, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = executor.Execute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) @@ -1627,8 +1627,8 @@ var pv = querypb.ExecuteOptions_Gen4 func TestGetPlanUnnormalized(t *testing.T) { r, _, _, _, ctx := createExecutorEnv(t) - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) @@ -1697,7 +1697,7 @@ func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcur Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}}, } - stmt, reservedVars, err := parseAndValidateQuery(sql) + stmt, reservedVars, err := parseAndValidateQuery(sql, sqlparser.NewTestParser()) require.NoError(t, err) plan, err := e.getPlan(context.Background(), vcursor, sql, stmt, comments, bindVars, reservedVars /* normalize */, e.normalize, logStats) require.NoError(t, err) @@ -1711,7 +1711,7 @@ func TestGetPlanCacheUnnormalized(t *testing.T) { t.Run("Cache", func(t *testing.T) { r, _, _, _, ctx := createExecutorEnv(t) - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) @@ -1735,7 +1735,7 @@ func TestGetPlanCacheUnnormalized(t *testing.T) { // Skip cache using directive r, _, _, _, ctx := createExecutorEnv(t) - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) @@ -1746,12 +1746,12 @@ func TestGetPlanCacheUnnormalized(t *testing.T) { assertCacheSize(t, r.plans, 1) // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) assertCacheSize(t, r.plans, 2) // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) assertCacheSize(t, r.plans, 2) }) @@ -1761,7 +1761,7 @@ func TestGetPlanCacheNormalized(t *testing.T) { t.Run("Cache", func(t *testing.T) { r, _, _, _, ctx := createExecutorEnv(t) r.normalize = true - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) @@ -1778,7 +1778,7 @@ func TestGetPlanCacheNormalized(t *testing.T) { // Skip cache using directive r, _, _, _, ctx := createExecutorEnv(t) r.normalize = true - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) @@ -1789,12 +1789,12 @@ func TestGetPlanCacheNormalized(t *testing.T) { assertCacheSize(t, r.plans, 1) // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) assertCacheSize(t, r.plans, 2) // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) assertCacheSize(t, r.plans, 2) }) @@ -1804,8 +1804,8 @@ func TestGetPlanNormalized(t *testing.T) { r, _, _, _, ctx := createExecutorEnv(t) r.normalize = true - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" query2 := "select * from music_user_map where id = 2" @@ -1862,10 +1862,10 @@ func TestGetPlanPriority(t *testing.T) { r.normalize = true logStats := logstats.NewLogStats(ctx, "Test", "", "", nil) - vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv, collations.MySQL8()) + vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) assert.NoError(t, err) - stmt, err := sqlparser.Parse(testCase.sql) + stmt, err := sqlparser.NewTestParser().Parse(testCase.sql) assert.NoError(t, err) crticalityFromStatement, _ := sqlparser.GetPriorityFromStatement(stmt) diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 65a29f62e12..657a1792c26 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -80,7 +80,7 @@ func (e *Executor) newExecute( query, comments := sqlparser.SplitMarginComments(sql) // 2: Parse and Validate query - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return err } @@ -97,7 +97,7 @@ func (e *Executor) newExecute( } } - vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv, e.collationEnv) + vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) if err != nil { return err } diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index 4c1bfc0c547..1a982a78489 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -70,7 +70,7 @@ func singleTable(ks, tbl string) string { // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) { - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := vschema.SQLParser().Parse2(query) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index e0555fbf6b5..fe5ebeb0889 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -2,6 +2,7 @@ package planbuilder import ( "context" + "errors" "fmt" "vitess.io/vitess/go/vt/key" @@ -147,7 +148,6 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt TargetDestination: destination, DDL: ddlStatement, SQL: query, - CollationEnv: vschema.CollationEnv(), }, nil } @@ -173,7 +173,8 @@ func findTableDestinationAndKeyspace(vschema plancontext.VSchema, ddlStatement s var err error table, _, _, _, destination, err = vschema.FindTableOrVindex(ddlStatement.GetTable()) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -313,7 +314,8 @@ func buildDropTable(vschema plancontext.VSchema, ddlStatement sqlparser.DDLState table, _, _, _, destinationTab, err = vschema.FindTableOrVindex(tab) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -356,7 +358,8 @@ func buildRenameTable(vschema plancontext.VSchema, renameTable *sqlparser.Rename table, _, _, _, destinationFrom, err = vschema.FindTableOrVindex(tabPair.FromTable) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } diff --git a/go/vt/vtgate/planbuilder/expression_converter_test.go b/go/vt/vtgate/planbuilder/expression_converter_test.go index 5cd38685ac2..5c65c9893b2 100644 --- a/go/vt/vtgate/planbuilder/expression_converter_test.go +++ b/go/vt/vtgate/planbuilder/expression_converter_test.go @@ -46,7 +46,7 @@ func TestConversion(t *testing.T) { for _, tc := range queries { t.Run(tc.expressionsIn, func(t *testing.T) { - statement, err := sqlparser.Parse("select " + tc.expressionsIn) + statement, err := sqlparser.NewTestParser().Parse("select " + tc.expressionsIn) require.NoError(t, err) slct := statement.(*sqlparser.Select) exprs := extract(slct.SelectExprs) diff --git a/go/vt/vtgate/planbuilder/operators/fuzz.go b/go/vt/vtgate/planbuilder/operators/fuzz.go index 6ee6b0bab83..c92810e3ae8 100644 --- a/go/vt/vtgate/planbuilder/operators/fuzz.go +++ b/go/vt/vtgate/planbuilder/operators/fuzz.go @@ -30,7 +30,7 @@ func FuzzAnalyse(data []byte) int { if err != nil { return 0 } - tree, err := sqlparser.Parse(query) + tree, err := sqlparser.NewTestParser().Parse(query) if err != nil { return -1 } diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 517b169bcf8..4495efeab3c 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -79,7 +79,7 @@ func TestQP(t *testing.T) { ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - stmt, err := sqlparser.Parse(tcase.sql) + stmt, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) sel := stmt.(*sqlparser.Select) @@ -193,7 +193,7 @@ func TestQPSimplifiedExpr(t *testing.T) { for _, tc := range testCases { t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) + ast, err := sqlparser.NewTestParser().Parse(tc.query) require.NoError(t, err) sel := ast.(*sqlparser.Select) _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index 247777117b5..6ebd71dcf1b 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -530,7 +530,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch if err != nil { t.Fatal(err) } - vschema := vindexes.BuildVSchema(formal) + vschema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) if err != nil { t.Fatal(err) } @@ -541,9 +541,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch // adding view in user keyspace if ks.Keyspace.Name == "user" { - if err = vschema.AddView(ks.Keyspace.Name, - "user_details_view", - "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id"); err != nil { + if err = vschema.AddView(ks.Keyspace.Name, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", sqlparser.NewTestParser()); err != nil { t.Fatal(err) } } @@ -566,7 +564,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ @@ -732,7 +730,7 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) { recover() }() - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) if err != nil { return } diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go index 958a9644a60..d6cb26c45eb 100644 --- a/go/vt/vtgate/planbuilder/plancontext/vschema.go +++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go @@ -92,6 +92,9 @@ type VSchema interface { // StorePrepareData stores the prepared data in the session. StorePrepareData(name string, v *vtgatepb.PrepareData) + + // SQLParser returns the proper sqlparser instance with the right version. + SQLParser() *sqlparser.Parser } // PlannerNameToVersion returns the numerical representation of the planner diff --git a/go/vt/vtgate/planbuilder/planner_test.go b/go/vt/vtgate/planbuilder/planner_test.go index 38c579502fe..2601615522f 100644 --- a/go/vt/vtgate/planbuilder/planner_test.go +++ b/go/vt/vtgate/planbuilder/planner_test.go @@ -58,7 +58,7 @@ func TestBindingSubquery(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.query, func(t *testing.T) { - parse, err := sqlparser.Parse(testcase.query) + parse, err := sqlparser.NewTestParser().Parse(testcase.query) require.NoError(t, err) selStmt := parse.(*sqlparser.Select) semTable, err := semantics.Analyze(selStmt, "d", &semantics.FakeSI{ diff --git a/go/vt/vtgate/planbuilder/rewrite_test.go b/go/vt/vtgate/planbuilder/rewrite_test.go index 292c94f448a..87c8985fd63 100644 --- a/go/vt/vtgate/planbuilder/rewrite_test.go +++ b/go/vt/vtgate/planbuilder/rewrite_test.go @@ -82,7 +82,7 @@ func TestHavingRewrite(t *testing.T) { } func prepTest(t *testing.T, sql string) (*semantics.SemTable, *sqlparser.ReservedVars, *sqlparser.Select) { - ast, vars, err := sqlparser.Parse2(sql) + ast, vars, err := sqlparser.NewTestParser().Parse2(sql) require.NoError(t, err) sel, isSelectStatement := ast.(*sqlparser.Select) diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index 63f6d0ea612..c03ab8c8801 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -73,7 +73,7 @@ func (rb *route) Wireup(ctx *plancontext.PlanningContext) error { } query, args := planableVindex.Query() - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := ctx.VSchema.SQLParser().Parse2(query) if err != nil { return err } diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 302215816c5..a94e3c1ae53 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -130,7 +130,7 @@ func buildSQLCalcFoundRowsPlan( return nil, nil, err } - statement2, reserved2, err := sqlparser.Parse2(originalQuery) + statement2, reserved2, err := vschema.SQLParser().Parse2(originalQuery) if err != nil { return nil, nil, err } diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 8944b9e8f30..33c0812a6cb 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -83,7 +83,7 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult } setOps = append(setOps, setOp) case sqlparser.NextTxScope, sqlparser.SessionScope: - planFunc, err := sysvarPlanningFuncs.Get(expr, vschema.CollationEnv()) + planFunc, err := sysvarPlanningFuncs.Get(expr, vschema.CollationEnv(), vschema.SQLParser()) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go index 169a916649d..f68622e7a27 100644 --- a/go/vt/vtgate/planbuilder/show_test.go +++ b/go/vt/vtgate/planbuilder/show_test.go @@ -50,7 +50,7 @@ func TestBuildDBPlan(t *testing.T) { for _, s := range testCases { t.Run(s.query, func(t *testing.T) { - parserOut, err := sqlparser.Parse(s.query) + parserOut, err := sqlparser.NewTestParser().Parse(s.query) require.NoError(t, err) show := parserOut.(*sqlparser.Show) @@ -110,7 +110,7 @@ func TestGenerateCharsetRows(t *testing.T) { for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - stmt, err := sqlparser.Parse(tc.input) + stmt, err := sqlparser.NewTestParser().Parse(tc.input) require.NoError(t, err) match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic) filter := match.Filter diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 56d310d2949..e13fef7ae70 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -41,7 +41,7 @@ func TestSimplifyBuggyQuery(t *testing.T) { V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) @@ -63,7 +63,7 @@ func TestSimplifyPanic(t *testing.T) { V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) @@ -88,7 +88,7 @@ func TestUnsupportedFile(t *testing.T) { for _, tcase := range readJSONTests("unsupported_cases.txt") { t.Run(tcase.Query, func(t *testing.T) { log.Errorf("unsupported_cases.txt - %s", tcase.Query) - stmt, reserved, err := sqlparser.Parse2(tcase.Query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(tcase.Query) require.NoError(t, err) _, ok := stmt.(sqlparser.SelectStatement) if !ok { @@ -104,7 +104,7 @@ func TestUnsupportedFile(t *testing.T) { reservedVars := sqlparser.NewReservedVars("vtg", reserved) ast := rewritten.AST origQuery := sqlparser.String(ast) - stmt, _, _ = sqlparser.Parse2(tcase.Query) + stmt, _, _ = sqlparser.NewTestParser().Parse2(tcase.Query) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), vschema.CurrentDb(), @@ -125,7 +125,7 @@ func TestUnsupportedFile(t *testing.T) { } func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err) } @@ -164,7 +164,7 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema * return false } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err.Error()) } diff --git a/go/vt/vtgate/planbuilder/system_variables.go b/go/vt/vtgate/planbuilder/system_variables.go index ff2fbb6f55c..454445eeb32 100644 --- a/go/vt/vtgate/planbuilder/system_variables.go +++ b/go/vt/vtgate/planbuilder/system_variables.go @@ -31,6 +31,7 @@ type sysvarPlanCache struct { funcs map[string]planFunc once sync.Once collationEnv *collations.Environment + parser *sqlparser.Parser } func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVariable, f func(setting) planFunc) { @@ -55,7 +56,7 @@ func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVaria } func (pc *sysvarPlanCache) parseAndBuildDefaultValue(sysvar sysvars.SystemVariable) evalengine.Expr { - stmt, err := sqlparser.Parse(fmt.Sprintf("select %s", sysvar.Default)) + stmt, err := pc.parser.Parse(fmt.Sprintf("select %s", sysvar.Default)) if err != nil { panic(fmt.Sprintf("bug in set plan init - default value for %s not parsable: %s", sysvar.Name, sysvar.Default)) } @@ -71,9 +72,10 @@ func (pc *sysvarPlanCache) parseAndBuildDefaultValue(sysvar sysvars.SystemVariab return def } -func (pc *sysvarPlanCache) init(collationEnv *collations.Environment) { +func (pc *sysvarPlanCache) init(collationEnv *collations.Environment, parser *sqlparser.Parser) { pc.once.Do(func() { pc.collationEnv = collationEnv + pc.parser = parser pc.funcs = make(map[string]planFunc) pc.initForSettings(sysvars.ReadOnly, buildSetOpReadOnly) pc.initForSettings(sysvars.IgnoreThese, buildSetOpIgnore) @@ -86,8 +88,8 @@ func (pc *sysvarPlanCache) init(collationEnv *collations.Environment) { var sysvarPlanningFuncs sysvarPlanCache -func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr, collationEnv *collations.Environment) (planFunc, error) { - pc.init(collationEnv) +func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr, collationEnv *collations.Environment, parser *sqlparser.Parser) (planFunc, error) { + pc.init(collationEnv, parser) pf, ok := pc.funcs[expr.Var.Name.Lowered()] if !ok { return nil, vterrors.VT05006(sqlparser.String(expr)) diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index c3a67b1d7e1..0508b7029ba 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -420,6 +420,10 @@ func (vh *vtgateHandler) KillQuery(connectionID uint32) error { return nil } +func (vh *vtgateHandler) SQLParser() *sqlparser.Parser { + return vh.vtg.executor.parser +} + func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { session, _ := c.ClientData.(*vtgatepb.Session) if session == nil { @@ -531,6 +535,7 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) @@ -577,6 +582,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) switch err := err.(type) { @@ -611,6 +617,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) return listener, listenerErr default: diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 21375050a4d..89786766e2f 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -30,20 +30,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/utils" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/trace" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tlstest" ) type testHandler struct { mysql.UnimplementedHandler lastConn *mysql.Conn + parser *sqlparser.Parser } func (th *testHandler) NewConnection(c *mysql.Conn) { @@ -83,6 +83,10 @@ func (th *testHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } +func (th *testHandler) SQLParser() *sqlparser.Parser { + return th.parser +} + func TestConnectionUnixSocket(t *testing.T) { th := &testHandler{} @@ -348,7 +352,7 @@ func TestGracefulShutdown(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess") + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess", 0) require.NoError(t, err) defer listener.Close() @@ -378,7 +382,7 @@ func TestGracefulShutdownWithTransaction(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess") + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess", 0) require.NoError(t, err) defer listener.Close() diff --git a/go/vt/vtgate/querylogz.go b/go/vt/vtgate/querylogz.go index acfb970df5a..0e8c8044515 100644 --- a/go/vt/vtgate/querylogz.go +++ b/go/vt/vtgate/querylogz.go @@ -57,7 +57,7 @@ var ( querylogzFuncMap = template.FuncMap{ "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, + "truncateQuery": sqlparser.NewTestParser().TruncateForUI, "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index e546fc68c6f..93bf347eeff 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -145,7 +144,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { e.ForEachPlan(func(plan *engine.Plan) bool { Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(e.parser.TruncateForUI(plan.Original)), } Value.Count, Value.tm, Value.ShardQueries, Value.RowsAffected, Value.RowsReturned, Value.Errors = plan.Stats() var timepq time.Duration diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index 2adb5b665a5..45fff46f629 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -73,6 +73,7 @@ type ( mu sync.Mutex entries []engine.ExecuteEntry lastID int + parser *sqlparser.Parser } // autocommitState keeps track of whether a single round-trip @@ -941,11 +942,13 @@ func (session *SafeSession) ClearAdvisoryLock() { session.AdvisoryLock = nil } -func (session *SafeSession) EnableLogging() { +func (session *SafeSession) EnableLogging(parser *sqlparser.Parser) { session.mu.Lock() defer session.mu.Unlock() - session.logging = &executeLogger{} + session.logging = &executeLogger{ + parser: parser, + } } // GetUDV returns the bind variable value for the user defined variable. @@ -998,7 +1001,7 @@ func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target, FiredFrom: primitive, }) } - ast, err := sqlparser.Parse(query) + ast, err := l.parser.Parse(query) if err != nil { panic("query not able to parse. this should not happen") } diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index a6d6cafc423..b5622c413c3 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -51,6 +51,8 @@ type ( // map of keyspace currently tracked tracked map[keyspaceStr]*updateController consumeDelay time.Duration + + parser *sqlparser.Parser } ) @@ -58,17 +60,18 @@ type ( const defaultConsumeDelay = 1 * time.Second // NewTracker creates the tracker object. -func NewTracker(ch chan *discovery.TabletHealth, enableViews bool) *Tracker { +func NewTracker(ch chan *discovery.TabletHealth, enableViews bool, parser *sqlparser.Parser) *Tracker { t := &Tracker{ ctx: context.Background(), ch: ch, tables: &tableMap{m: make(map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo)}, tracked: map[keyspaceStr]*updateController{}, consumeDelay: defaultConsumeDelay, + parser: parser, } if enableViews { - t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}} + t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}, parser: parser} } return t } @@ -290,7 +293,7 @@ func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { func (t *Tracker) updateTables(keyspace string, res map[string]string) { for tableName, tableDef := range res { - stmt, err := sqlparser.Parse(tableDef) + stmt, err := t.parser.Parse(tableDef) if err != nil { log.Warningf("error parsing table definition for %s: %v", tableName, err) continue @@ -483,7 +486,8 @@ func (t *Tracker) clearKeyspaceTables(ks string) { } type viewMap struct { - m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + parser *sqlparser.Parser } func (vm *viewMap) set(ks, tbl, sql string) { @@ -492,7 +496,7 @@ func (vm *viewMap) set(ks, tbl, sql string) { m = make(map[tableNameStr]sqlparser.SelectStatement) vm.m[ks] = m } - stmt, err := sqlparser.Parse(sql) + stmt, err := vm.parser.Parse(sql) if err != nil { log.Warningf("ignoring view '%s', parsing error in view definition: '%s'", tbl, sql) return diff --git a/go/vt/vtgate/schema/tracker_test.go b/go/vt/vtgate/schema/tracker_test.go index ce2a2d79b56..7b60278cbbf 100644 --- a/go/vt/vtgate/schema/tracker_test.go +++ b/go/vt/vtgate/schema/tracker_test.go @@ -81,7 +81,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { sbc := sandboxconn.NewSandboxConn(tablet) ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, false) + tracker := NewTracker(ch, false, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -396,7 +396,7 @@ type testCases struct { func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []testCases) { ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, true) + tracker := NewTracker(ch, true, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index d27d5a926c6..8f0cc7a9704 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -120,7 +120,7 @@ func TestBindingSingleTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "d", &FakeSI{}) require.NoError(t, err) @@ -140,7 +140,7 @@ func TestBindingSingleAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -238,7 +238,7 @@ func TestBindingMultiTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -262,7 +262,7 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -285,7 +285,7 @@ func TestNotUniqueTableName(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) _, err := Analyze(parse, "test", &FakeSI{}) require.Error(t, err) require.Contains(t, err.Error(), "VT03013: not unique table/alias") @@ -300,7 +300,7 @@ func TestMissingTable(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) st, err := Analyze(parse, "", &FakeSI{}) require.NoError(t, err) require.ErrorContains(t, st.NotUnshardedErr, "column 't.col' not found") @@ -388,7 +388,7 @@ func TestUnknownColumnMap2(t *testing.T) { queries := []string{"select col from a, b", "select col from a as user, b as extra"} for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) expr := extract(parse.(*sqlparser.Select), 0) for _, test := range tests { @@ -419,7 +419,7 @@ func TestUnknownPredicate(t *testing.T) { Name: sqlparser.NewIdentifierCS("b"), } - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) tests := []struct { name string @@ -457,7 +457,7 @@ func TestScoping(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -537,7 +537,7 @@ func TestSubqueryOrderByBinding(t *testing.T) { for _, tc := range queries { t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) + ast, err := sqlparser.NewTestParser().Parse(tc.query) require.NoError(t, err) sel := ast.(*sqlparser.Select) @@ -842,7 +842,7 @@ func TestInvalidQueries(t *testing.T) { for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { - parse, err := sqlparser.Parse(tc.sql) + parse, err := sqlparser.NewTestParser().Parse(tc.sql) require.NoError(t, err) st, err := Analyze(parse, "dbName", fakeSchemaInfo()) @@ -961,7 +961,7 @@ func TestScopingWDerivedTables(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1063,7 +1063,7 @@ func TestScopingWithWITH(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1114,7 +1114,7 @@ func TestJoinPredicateDependencies(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", fakeSchemaInfo()) @@ -1173,7 +1173,7 @@ func TestDerivedTablesOrderClause(t *testing.T) { si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", si) @@ -1207,7 +1207,7 @@ func TestScopingWComplexDerivedTables(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1248,7 +1248,7 @@ func TestScopingWVindexTables(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) hash, _ := vindexes.CreateVindex("hash", "user_index", nil) st, err := Analyze(parse, "user", &FakeSI{ @@ -1290,7 +1290,7 @@ func BenchmarkAnalyzeMultipleDifferentQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1314,7 +1314,7 @@ func BenchmarkAnalyzeUnionQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1340,7 +1340,7 @@ func BenchmarkAnalyzeSubQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1370,7 +1370,7 @@ func BenchmarkAnalyzeDerivedTableQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1396,7 +1396,7 @@ func BenchmarkAnalyzeHavingQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1425,7 +1425,7 @@ func BenchmarkAnalyzeGroupByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1448,7 +1448,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1458,7 +1458,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { func parseAndAnalyze(t *testing.T, query, dbName string) (sqlparser.Statement, *SemTable) { t.Helper() - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) semTable, err := Analyze(parse, dbName, fakeSchemaInfo()) @@ -1529,7 +1529,7 @@ func TestNextErrors(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = Analyze(parse, "d", fakeSchemaInfo()) @@ -1553,7 +1553,7 @@ func TestUpdateErrors(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) st, err := Analyze(parse, "d", fakeSchemaInfo()) @@ -1571,7 +1571,7 @@ func TestUpdateErrors(t *testing.T) { func TestScopingSubQueryJoinClause(t *testing.T) { query := "select (select 1 from u1 join u2 on u1.id = u2.id and u2.id = u3.id) x from u3" - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go index bf09d2d5cc3..09ddb223eef 100644 --- a/go/vt/vtgate/semantics/early_rewriter_test.go +++ b/go/vt/vtgate/semantics/early_rewriter_test.go @@ -187,7 +187,7 @@ func TestExpandStar(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -288,7 +288,7 @@ func TestRewriteJoinUsingColumns(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -346,7 +346,7 @@ func TestOrderByGroupByLiteral(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement := ast.(*sqlparser.Select) _, err = Analyze(selectStatement, cDB, schemaInfo) @@ -381,7 +381,7 @@ func TestHavingAndOrderByColumnName(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement := ast.(*sqlparser.Select) _, err = Analyze(selectStatement, cDB, schemaInfo) @@ -426,7 +426,7 @@ func TestSemTableDependenciesAfterExpandStar(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -486,7 +486,7 @@ func TestRewriteNot(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -538,7 +538,7 @@ func TestConstantFolding(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, schemaInfo) require.NoError(t, err) @@ -565,7 +565,7 @@ func TestCTEToDerivedTableRewrite(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, fakeSchemaInfo()) require.NoError(t, err) diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go index b150b5d4141..66315937174 100644 --- a/go/vt/vtgate/semantics/info_schema.go +++ b/go/vt/vtgate/semantics/info_schema.go @@ -30,11 +30,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func createCol(name string, typ int, collation string, def string, size, scale int32, notNullable bool, values string) vindexes.Column { +func createCol(parser *sqlparser.Parser, name string, typ int, collation string, def string, size, scale int32, notNullable bool, values string) vindexes.Column { var expr sqlparser.Expr if def != "" { var err error - expr, err = sqlparser.ParseExpr(def) + expr, err = parser.ParseExpr(def) if err != nil { panic(fmt.Sprintf("Failed to parse %q: %v", def, err)) } @@ -66,628 +66,632 @@ func createCol(name string, typ int, collation string, def string, size, scale i // getInfoSchema57 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema57() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "5.7.9"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 60, 0, true, "")) - cols = append(cols, createCol("MAXLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 60, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("ID", 265, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SORTLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_KEY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("PRIVILEGES", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("COLUMN_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("INTERVAL_FIELD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) - cols = append(cols, createCol("ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORIGINATOR", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("FILE_NAME", 6165, "utf8mb3_general_ci", "", 4000, 0, false, "")) - cols = append(cols, createCol("FILE_TYPE", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("FULLTEXT_KEYS", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DELETED_ROWS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("UPDATE_COUNT", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATION_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_ACCESS_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("RECOVER_TIME", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("TRANSACTION_COUNTER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6165, "utf8mb3_general_ci", "", 4000, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_STATUS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_VARIABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("BLOCK_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LRU_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POOL_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FREE_BUFFERS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_DECOMPRESS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_READS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_READ_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_GET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("HIT_RATE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("LRU_IO_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LRU_IO_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("COUNT_RESET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT_RESET", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 202, 0, false, "")) - cols = append(cols, createCol("N_COLS", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("SPACE", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("PER_TABLE_TABLESPACE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_COMPRESSED", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 202, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "PER_TABLE_TABLESPACE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_COMPRESSED", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) - cols = append(cols, createCol("trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) - cols = append(cols, createCol("trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 81, 0, false, "")) - cols = append(cols, createCol("trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("trx_weight", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_mysql_thread_id", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("trx_tables_in_use", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_tables_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_lock_structs", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_lock_memory_bytes", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_rows_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_rows_modified", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_concurrency_tickets", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("trx_unique_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_foreign_key_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_is_read_only", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_id", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 81, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 265, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 265, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TRACE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "QUERY", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 263, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) - cols = append(cols, createCol("PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 263, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("PARTITION_COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("NODEGROUP", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("TIME", 263, "utf8mb3_general_ci", "0", 7, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INFO", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "0", 7, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("SEQ", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("DURATION", 18, "utf8mb3_general_ci", "0.000000", 9, 6, true, "")) - cols = append(cols, createCol("CPU_USER", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) - cols = append(cols, createCol("CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SWAPS", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) - cols = append(cols, createCol("SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_LINE", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "0.000000", 9, 6, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("MATCH_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UPDATE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DELETE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DELETE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SQL_DATA_ACCESS", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 6165, "utf8mb3_general_ci", "", 512, 0, false, "")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 512, 0, false, "")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("NON_UNIQUE", 265, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SEQ_IN_INDEX", 265, "utf8mb3_general_ci", "0", 2, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("CARDINALITY", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("SUB_PART", 265, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("PACKED", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 16, 0, false, "")) - cols = append(cols, createCol("INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 265, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 265, "utf8mb3_general_ci", "0", 2, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 16, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTO_INCREMENT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("TABLE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NODEGROUP_ID", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_MANIPULATION", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ACTION_ORDER", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("ACTION_CONDITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("ACTION_TIMING", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 2, 0, false, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 2, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CHECK_OPTION", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("IS_UPDATABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["VIEWS"] = cols return infSchema } @@ -695,897 +699,901 @@ func getInfoSchema57() map[string][]vindexes.Column { // getInfoSchema80 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema80() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "8.0.30"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ADMINISTRABLE_ROLE_AUTHORIZATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["APPLICABLE_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("MAXLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHECK_CLAUSE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_CLAUSE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHECK_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ID", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SORTLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAD_ATTRIBUTE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'PAD SPACE','NO PAD'")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAD_ATTRIBUTE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'PAD SPACE','NO PAD'")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("HISTOGRAM", 2078, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "HISTOGRAM", 2078, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMN_STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_KEY", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'','PRI','UNI','MUL'")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("PRIVILEGES", 6165, "utf8mb3_general_ci", "", 154, 0, false, "")) - cols = append(cols, createCol("COLUMN_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'','PRI','UNI','MUL'")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 154, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ENABLED_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_BODY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("INTERVAL_FIELD", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND'")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'ENABLED','DISABLED','SLAVESIDE_DISABLED'")) - cols = append(cols, createCol("ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("ORIGINATOR", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND'")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'ENABLED','DISABLED','SLAVESIDE_DISABLED'")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FILE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FILE_TYPE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6167, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("FULLTEXT_KEYS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DELETED_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_COUNT", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATION_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_ACCESS_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("RECOVER_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TRANSACTION_COUNTER", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6167, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("BLOCK_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_STALE", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_STALE", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LRU_POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POOL_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FREE_BUFFERS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_DECOMPRESS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_READS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_READ_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_GET", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("HIT_RATE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("LRU_IO_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LRU_IO_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_CACHED_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_CACHED_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CACHED_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("POS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PRTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LEN", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("HAS_DEFAULT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFAULT_VALUE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PRTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LEN", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HAS_DEFAULT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_VALUE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) infSchema["INNODB_DATAFILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("POS", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FIELDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("FOR_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("REF_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("N_COLS", 265, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("TYPE", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "REF_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 265, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FOREIGN"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("FOR_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REF_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FOREIGN_COLS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_FIELDS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NO", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MERGE_THRESHOLD", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_FIELDS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NO", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MERGE_THRESHOLD", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COUNT", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT_RESET", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 4001, 0, true, "")) - cols = append(cols, createCol("SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("PURPOSE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 4001, 0, true, "")) + cols = append(cols, createCol(parser, "SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "PURPOSE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) infSchema["INNODB_SESSION_TEMP_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) - cols = append(cols, createCol("FLAG", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("INSTANT_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TOTAL_ROW_VERSIONS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "INSTANT_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TOTAL_ROW_VERSIONS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) - cols = append(cols, createCol("FLAG", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 22, 0, false, "")) - cols = append(cols, createCol("PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("FS_BLOCK_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FILE_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ALLOCATED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SERVER_VERSION", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("SPACE_VERSION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ENCRYPTION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 22, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "FS_BLOCK_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ALLOCATED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SERVER_VERSION", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_VERSION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ENCRYPTION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) infSchema["INNODB_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("FLAG", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) infSchema["INNODB_TABLESPACES_BRIEF"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("STATS_INITIALIZED", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("NUM_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CLUST_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OTHER_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MODIFIED_COUNTER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("AUTOINC", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("REF_COUNT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "STATS_INITIALIZED", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NUM_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CLUST_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OTHER_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_COUNTER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOINC", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COUNT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLESTATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("N_COLS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) - cols = append(cols, createCol("trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 105, 0, false, "")) - cols = append(cols, createCol("trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("trx_weight", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_mysql_thread_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("trx_tables_in_use", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_tables_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_lock_structs", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_lock_memory_bytes", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_rows_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_rows_modified", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_concurrency_tickets", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("trx_unique_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_foreign_key_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_is_read_only", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_schedule_weight", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 105, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_schedule_weight", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("BASE_POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BASE_POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_VIRTUAL"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 776, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 128, 0, false, "")) - cols = append(cols, createCol("RESERVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 128, 0, false, "")) + cols = append(cols, createCol(parser, "RESERVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["KEYWORDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) - cols = append(cols, createCol("TRACE", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) + cols = append(cols, createCol(parser, "QUERY", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) - cols = append(cols, createCol("PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NODEGROUP", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 261, 0, true, "")) - cols = append(cols, createCol("DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("TIME", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INFO", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 261, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SEQ", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("DURATION", 18, "utf8mb3_general_ci", "", 905, 0, true, "")) - cols = append(cols, createCol("CPU_USER", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) - cols = append(cols, createCol("CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SWAPS", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) - cols = append(cols, createCol("SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_LINE", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "", 905, 0, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("MATCH_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NONE','PARTIAL','FULL'")) - cols = append(cols, createCol("UPDATE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) - cols = append(cols, createCol("DELETE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NONE','PARTIAL','FULL'")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "DELETE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("RESOURCE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("RESOURCE_GROUP_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'SYSTEM','USER'")) - cols = append(cols, createCol("RESOURCE_GROUP_ENABLED", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) - cols = append(cols, createCol("VCPU_IDS", 10260, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("THREAD_PRIORITY", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'SYSTEM','USER'")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_ENABLED", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) + cols = append(cols, createCol(parser, "VCPU_IDS", 10260, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "THREAD_PRIORITY", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["RESOURCE_GROUPS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','References'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','References'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_COLUMN_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Execute','Alter Routine','Grant'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Execute','Alter Routine','Grant'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_ROUTINE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_TABLE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "SQL", 64, 0, true, "")) - cols = append(cols, createCol("PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SQL_DATA_ACCESS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'CONTAINS SQL','NO SQL','READS SQL DATA','MODIFIES SQL DATA'")) - cols = append(cols, createCol("SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'DEFAULT','INVOKER','DEFINER'")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "SQL", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'CONTAINS SQL','NO SQL','READS SQL DATA','MODIFIES SQL DATA'")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'DEFAULT','INVOKER','DEFINER'")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DEFAULT_ENCRYPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO','YES'")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_ENCRYPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO','YES'")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["SCHEMATA_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("GEOMETRY_TYPE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "GEOMETRY_TYPE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["ST_GEOMETRY_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ORGANIZATION", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ORGANIZATION_COORDSYS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DEFINITION", 6165, "utf8mb3_general_ci", "", 4096, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ORGANIZATION", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ORGANIZATION_COORDSYS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFINITION", 6165, "utf8mb3_general_ci", "", 4096, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["ST_SPATIAL_REFERENCE_SYSTEMS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("UNIT_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("UNIT_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) - cols = append(cols, createCol("CONVERSION_FACTOR", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "UNIT_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "UNIT_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CONVERSION_FACTOR", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["ST_UNITS_OF_MEASURE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("NON_UNIQUE", 263, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SEQ_IN_INDEX", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("CARDINALITY", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUB_PART", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PACKED", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("IS_VISIBLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 263, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "IS_VISIBLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) - cols = append(cols, createCol("ENFORCED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "ENFORCED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLE_CONSTRAINTS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BASE TABLE','VIEW','SYSTEM VIEW'")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VERSION", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'Fixed','Dynamic','Compressed','Redundant','Compact','Paged'")) - cols = append(cols, createCol("TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTO_INCREMENT", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BASE TABLE','VIEW','SYSTEM VIEW'")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'Fixed','Dynamic','Compressed','Redundant','Compact','Paged'")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NODEGROUP_ID", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLESPACES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_MANIPULATION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'INSERT','UPDATE','DELETE'")) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_ORDER", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_CONDITION", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_TIMING", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BEFORE','AFTER'")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 2, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'INSERT','UPDATE','DELETE'")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BEFORE','AFTER'")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 2, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("ATTRIBUTE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "USER", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "ATTRIBUTE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["USER_ATTRIBUTES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEW_ROUTINE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VIEW_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["VIEW_TABLE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NONE','LOCAL','CASCADED'")) - cols = append(cols, createCol("IS_UPDATABLE", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NO','YES'")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NONE','LOCAL','CASCADED'")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NO','YES'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEWS"] = cols return infSchema } diff --git a/go/vt/vtgate/semantics/semantic_state_test.go b/go/vt/vtgate/semantics/semantic_state_test.go index b904f3656de..4ae0a5562b5 100644 --- a/go/vt/vtgate/semantics/semantic_state_test.go +++ b/go/vt/vtgate/semantics/semantic_state_test.go @@ -46,7 +46,7 @@ func TestBindingAndExprEquality(t *testing.T) { for _, test := range tests { t.Run(test.expressions, func(t *testing.T) { - parse, err := sqlparser.Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) + parse, err := sqlparser.NewTestParser().Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) require.NoError(t, err) st, err := Analyze(parse, "db", fakeSchemaInfoTest()) require.NoError(t, err) @@ -853,7 +853,7 @@ func TestIsFkDependentColumnUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err) semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) require.NoError(t, err) @@ -970,7 +970,7 @@ func TestHasNonLiteralForeignKeyUpdate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err) semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) require.NoError(t, err) diff --git a/go/vt/vtgate/semantics/typer_test.go b/go/vt/vtgate/semantics/typer_test.go index c5417edbf64..c87d5672dab 100644 --- a/go/vt/vtgate/semantics/typer_test.go +++ b/go/vt/vtgate/semantics/typer_test.go @@ -40,7 +40,7 @@ func TestNormalizerAndSemanticAnalysisIntegration(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) err = sqlparser.Normalize(parse, sqlparser.NewReservedVars("bv", sqlparser.BindVars{}), map[string]*querypb.BindVariable{}) diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index 21c238e1a47..c8b052d7a9c 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -49,7 +49,7 @@ order by unsharded.orderByExpr2 asc limit 123 offset 456 ` - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Printf(">> found expression: %s\n", sqlparser.String(cursor.expr)) @@ -67,7 +67,7 @@ limit 123 offset 456 func TestAbortExpressionCursor(t *testing.T) { query := "select user.id, count(*), unsharded.name from user join unsharded on 13 = 14 where unsharded.id = 42 and name = 'foo' and user.id = unsharded.id" - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Println(sqlparser.String(cursor.expr)) diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index aa6c0f347fe..17ed098c256 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -83,6 +83,9 @@ type iExecute interface { ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) VSchema() *vindexes.VSchema planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) + + collationEnv() *collations.Environment + sqlparser() *sqlparser.Parser } // VSchemaOperator is an interface to Vschema Operations @@ -104,7 +107,6 @@ type vcursorImpl struct { topoServer *topo.Server logStats *logstats.LogStats collation collations.ID - collationEnv *collations.Environment // fkChecksState stores the state of foreign key checks variable. // This state is meant to be the final fk checks state after consulting the @@ -139,7 +141,6 @@ func newVCursorImpl( serv srvtopo.Server, warnShardedOnly bool, pv plancontext.PlannerVersion, - collationEnv *collations.Environment, ) (*vcursorImpl, error) { keyspace, tabletType, destination, err := parseDestinationTarget(safeSession.TargetString, vschema) if err != nil { @@ -164,7 +165,7 @@ func newVCursorImpl( } } if connCollation == collations.Unknown { - connCollation = collationEnv.DefaultConnectionCharset() + connCollation = executor.collEnv.DefaultConnectionCharset() } warmingReadsPct := 0 @@ -190,7 +191,6 @@ func newVCursorImpl( pv: pv, warmingReadsPercent: warmingReadsPct, warmingReadsChannel: warmingReadsChan, - collationEnv: collationEnv, }, nil } @@ -211,7 +211,11 @@ func (vc *vcursorImpl) ConnCollation() collations.ID { // ConnCollation returns the collation of this session func (vc *vcursorImpl) CollationEnv() *collations.Environment { - return vc.collationEnv + return vc.executor.collationEnv() +} + +func (vc *vcursorImpl) SQLParser() *sqlparser.Parser { + return vc.executor.sqlparser() } func (vc *vcursorImpl) TimeZone() *time.Location { @@ -1090,7 +1094,7 @@ func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.Stri _, _ = buf.WriteString(vc.keyspace) _, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType]) _, _ = buf.WriteString("+Collate:") - _, _ = buf.WriteString(vc.collationEnv.LookupName(vc.collation)) + _, _ = buf.WriteString(vc.CollationEnv().LookupName(vc.collation)) if vc.destination != nil { switch vc.destination.(type) { @@ -1248,7 +1252,7 @@ func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda } func (vc *vcursorImpl) CanUseSetVar() bool { - return sqlparser.IsMySQL80AndAbove() && setVarEnabled + return vc.SQLParser().IsMySQL80AndAbove() && setVarEnabled } func (vc *vcursorImpl) ReleaseLock(ctx context.Context) error { @@ -1277,7 +1281,7 @@ func (vc *vcursorImpl) cloneWithAutocommitSession() *vcursorImpl { } func (vc *vcursorImpl) VExplainLogging() { - vc.safeSession.EnableLogging() + vc.safeSession.EnableLogging(vc.SQLParser()) } func (vc *vcursorImpl) GetVExplainLogs() []engine.ExecuteEntry { diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index 37358909641..b8e4a0d3a0a 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" @@ -185,9 +184,10 @@ func TestDestinationKeyspace(t *testing.T) { expectedError: errNoKeyspace.Error(), }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(strconv.Itoa(i)+tc.targetString, func(t *testing.T) { - impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4, collations.MySQL8()) + impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) impl.vschema = tc.vschema dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier) if tc.expectedError == "" { @@ -243,9 +243,10 @@ func TestSetTarget(t *testing.T) { expectedError: "can't execute the given command because you have an active transaction", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { - vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4, collations.MySQL8()) + vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) vc.vschema = tc.vschema err := vc.SetTarget(tc.targetString) if tc.expectedError == "" { @@ -291,11 +292,12 @@ func TestKeyForPlan(t *testing.T) { expectedPlanPrefixKey: "ks1@replica+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { ss := NewSafeSession(&vtgatepb.Session{InTransaction: false}) ss.SetTargetString(tc.targetString) - vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4, collations.MySQL8()) + vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) vc.vschema = tc.vschema @@ -317,7 +319,8 @@ func TestFirstSortedKeyspace(t *testing.T) { ks3Schema.Keyspace.Name: ks3Schema, }} - vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4, collations.MySQL8()) + r, _, _, _, _ := createExecutorEnv(t) + vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) ks, err := vc.FirstSortedKeyspace() require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index 96c94be87d1..ba703f31c22 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -309,7 +309,7 @@ func (source *Source) String() string { } // BuildVSchema builds a VSchema from a SrvVSchema. -func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { +func BuildVSchema(source *vschemapb.SrvVSchema, parser *sqlparser.Parser) (vschema *VSchema) { vschema = &VSchema{ RoutingRules: make(map[string]*RoutingRule), globalTables: make(map[string]*Table), @@ -317,22 +317,22 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { Keyspaces: make(map[string]*KeyspaceSchema), created: time.Now(), } - buildKeyspaces(source, vschema) + buildKeyspaces(source, vschema, parser) // buildGlobalTables before buildReferences so that buildReferences can // resolve sources which reference global tables. buildGlobalTables(source, vschema) buildReferences(source, vschema) - buildRoutingRule(source, vschema) + buildRoutingRule(source, vschema, parser) buildShardRoutingRule(source, vschema) // Resolve auto-increments after routing rules are built since sequence tables also obey routing rules. - resolveAutoIncrement(source, vschema) + resolveAutoIncrement(source, vschema, parser) return vschema } // BuildKeyspaceSchema builds the vschema portion for one keyspace. // The build ignores sequence references because those dependencies can // go cross-keyspace. -func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceSchema, error) { +func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string, parser *sqlparser.Parser) (*KeyspaceSchema, error) { if input == nil { input = &vschemapb.Keyspace{} } @@ -346,18 +346,18 @@ func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceS uniqueVindexes: make(map[string]Vindex), Keyspaces: make(map[string]*KeyspaceSchema), } - buildKeyspaces(formal, vschema) + buildKeyspaces(formal, vschema, parser) err := vschema.Keyspaces[keyspace].Error return vschema.Keyspaces[keyspace], err } // BuildKeyspace ensures that the keyspace vschema is valid. // External references (like sequence) are not validated. -func BuildKeyspace(input *vschemapb.Keyspace) (*KeyspaceSchema, error) { - return BuildKeyspaceSchema(input, "") +func BuildKeyspace(input *vschemapb.Keyspace, parser *sqlparser.Parser) (*KeyspaceSchema, error) { + return BuildKeyspaceSchema(input, "", parser) } -func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := &KeyspaceSchema{ Keyspace: &Keyspace{ @@ -369,7 +369,7 @@ func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { Vindexes: make(map[string]Vindex), } vschema.Keyspaces[ksname] = ksvschema - ksvschema.Error = buildTables(ks, vschema, ksvschema) + ksvschema.Error = buildTables(ks, vschema, ksvschema, parser) } } @@ -381,12 +381,12 @@ func replaceUnspecifiedForeignKeyMode(fkMode vschemapb.Keyspace_ForeignKeyMode) return fkMode } -func (vschema *VSchema) AddView(ksname string, viewName, query string) error { +func (vschema *VSchema) AddView(ksname, viewName, query string, parser *sqlparser.Parser) error { ks, ok := vschema.Keyspaces[ksname] if !ok { return fmt.Errorf("keyspace %s not found in vschema", ksname) } - ast, err := sqlparser.Parse(query) + ast, err := parser.Parse(query) if err != nil { return err } @@ -555,7 +555,7 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error return nil } -func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema) error { +func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema, parser *sqlparser.Parser) error { keyspace := ksvschema.Keyspace for vname, vindexInfo := range ks.Vindexes { vindex, err := CreateVindex(vindexInfo.Type, vname, vindexInfo.Params) @@ -650,7 +650,7 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc var colDefault sqlparser.Expr if col.Default != "" { var err error - colDefault, err = sqlparser.ParseExpr(col.Default) + colDefault, err = parser.ParseExpr(col.Default) if err != nil { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse the '%s' column's default expression '%s' for table '%s'", col.Name, col.Default, tname) @@ -809,7 +809,7 @@ func (vschema *VSchema) addTableName(t *Table) { } } -func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { +func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := vschema.Keyspaces[ksname] for tname, table := range ks.Tables { @@ -817,7 +817,7 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { if t == nil || table.AutoIncrement == nil { continue } - seqks, seqtab, err := sqlparser.ParseTable(table.AutoIncrement.Sequence) + seqks, seqtab, err := parser.ParseTable(table.AutoIncrement.Sequence) var seq *Table if err == nil { // Ensure that sequence tables also obey routing rules. @@ -893,7 +893,7 @@ func parseTable(tableName string) (sqlparser.TableName, error) { }, nil } -func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { var err error if source.RoutingRules == nil { return @@ -936,7 +936,7 @@ outer: continue outer } - toKeyspace, toTableName, err := sqlparser.ParseTable(toTable) + toKeyspace, toTableName, err := parser.ParseTable(toTable) if err != nil { vschema.RoutingRules[rule.FromTable] = &RoutingRule{ diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index ebcb39fef29..ad892a66ccb 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -235,7 +235,7 @@ func init() { } func buildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { - vs := BuildVSchema(source) + vs := BuildVSchema(source, sqlparser.NewTestParser()) if vs != nil { vs.ResetCreated() } @@ -247,7 +247,7 @@ func TestUnshardedVSchemaValid(t *testing.T) { Sharded: false, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) } @@ -282,7 +282,7 @@ func TestForeignKeyMode(t *testing.T) { ForeignKeyMode: test.fkMode, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, test.wantedFkMode, ksSchema.ForeignKeyMode) }) @@ -297,7 +297,7 @@ func TestUnshardedVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": {}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) table, err := got.FindTable("unsharded", "t1") @@ -322,7 +322,7 @@ func TestVSchemaColumns(t *testing.T) { {Name: "c4", Type: sqltypes.TypeJSON, Default: "json_array()"}, }}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) t1, err := got.FindTable("unsharded", "t1") @@ -352,11 +352,11 @@ func TestVSchemaViews(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) // add view to unsharded keyspace. - vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1") + vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1", sqlparser.NewTestParser()) view := vschema.FindView("unsharded", "v1") assert.Equal(t, "select c1 + c2 as added from t1", sqlparser.String(view)) @@ -411,7 +411,7 @@ func TestVSchemaForeignKeys(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["main"].Error) // add fk constraints to a keyspace. @@ -474,7 +474,7 @@ func TestVSchemaColumnListAuthoritative(t *testing.T) { Type: sqltypes.VarChar}}, ColumnListAuthoritative: true}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) t1, err := got.FindTable("unsharded", "t1") require.NoError(t, err) @@ -493,7 +493,7 @@ func TestVSchemaColumnsFail(t *testing.T) { Name: "c1"}, { Name: "c1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.EqualError(t, got.Keyspaces["unsharded"].Error, "duplicate column name 'c1' for table: t1") } @@ -506,7 +506,7 @@ func TestVSchemaPinned(t *testing.T) { "t1": { Pinned: "80"}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -538,7 +538,7 @@ func TestShardedVSchemaOwned(t *testing.T) { Column: "c2", Name: "stln1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -608,7 +608,7 @@ func TestShardedVSchemaOwnerInfo(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) results := []struct { @@ -710,7 +710,7 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, } - got := BuildVSchema(&input) + got := BuildVSchema(&input, sqlparser.NewTestParser()) ks1 := &Keyspace{ Name: "ks1", Sharded: true, @@ -958,7 +958,7 @@ func TestFindBestColVindex(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t2": {}}}}} - vs := BuildVSchema(testSrvVSchema) + vs := BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { tablename string @@ -1274,7 +1274,7 @@ func TestBuildVSchemaVindexNotFoundFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `vindexType "noexist" not found` if err == nil || err.Error() != want { @@ -1298,7 +1298,7 @@ func TestBuildVSchemaNoColumnVindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "missing primary col vindex for table: t1" if err == nil || err.Error() != want { @@ -1583,7 +1583,7 @@ func TestBuildVSchemaNoindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "vindex notexist not found for table t1" if err == nil || err.Error() != want { @@ -1615,7 +1615,7 @@ func TestBuildVSchemaColumnAndColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `can't use column and columns at the same time in vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1645,7 +1645,7 @@ func TestBuildVSchemaNoColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `must specify at least one column for vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1676,7 +1676,7 @@ func TestBuildVSchemaNotUniqueFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stln is not Unique for table t1" if err == nil || err.Error() != want { @@ -1708,7 +1708,7 @@ func TestBuildVSchemaPrimaryCannotBeOwned(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stlu cannot be owned for table t1" if err == nil || err.Error() != want { @@ -1736,7 +1736,7 @@ func TestBuildVSchemaReferenceTableSourceMayBeUnqualified(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) require.NoError(t, vschema.Keyspaces["sharded"].Error) } @@ -1768,7 +1768,7 @@ func TestBuildVSchemaReferenceTableSourceMustBeInDifferentKeyspace(t *testing.T) }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"sharded.src\" may not reference a table in the same keyspace as table: ref") @@ -1788,7 +1788,7 @@ func TestBuildVSchemaReferenceTableSourceKeyspaceMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a non-existent keyspace \"unsharded\"") @@ -1814,7 +1814,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a table \"src\" that is not present in the VSchema of keyspace \"unsharded\"") @@ -1852,7 +1852,7 @@ func TestBuildVSchemaReferenceTableSourceMayUseShardedKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["sharded1"].Error) require.NoError(t, vschema.Keyspaces["sharded2"].Error) } @@ -1919,7 +1919,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReferenceWithoutSourc }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded1"].Error) require.EqualError(t, vschema.Keyspaces["sharded1"].Error, "source \"unsharded1.src1\" may not reference a table of type \"sequence\": ref1") @@ -1953,7 +1953,7 @@ func TestBuildVSchemaSourceMayBeReferencedAtMostOncePerKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" may not be referenced more than once per keyspace: ref1, ref2") @@ -1991,7 +1991,7 @@ func TestBuildVSchemaMayNotChainReferences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["unsharded1"].Error) require.EqualError(t, vschema.Keyspaces["unsharded1"].Error, "reference chaining is not allowed ref => unsharded2.ref => unsharded3.ref: ref") @@ -2193,7 +2193,7 @@ func TestBadSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "cannot resolve sequence invalid_seq: table invalid_seq not found" if err == nil || err.Error() != want { @@ -2241,7 +2241,7 @@ func TestBadSequenceName(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "invalid table name: a.b.seq" if err == nil || !strings.Contains(err.Error(), want) { @@ -2265,7 +2265,7 @@ func TestBadShardedSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "sequence table has to be in an unsharded keyspace or must be pinned: t1" if err == nil || err.Error() != want { @@ -2316,7 +2316,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "t1") require.EqualError(t, err, "ambiguous table reference: t1") @@ -2440,7 +2440,7 @@ func TestFindTableOrVindex(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) ta := vschema.Keyspaces["ksa"].Tables["ta"] t1 := vschema.Keyspaces["ksb"].Tables["t1"] @@ -2543,7 +2543,7 @@ func TestBuildKeyspaceSchema(t *testing.T) { "t2": {}, }, } - got, _ := BuildKeyspaceSchema(good, "ks") + got, _ := BuildKeyspaceSchema(good, "ks", sqlparser.NewTestParser()) err := got.Error require.NoError(t, err) ks := &Keyspace{ @@ -2585,7 +2585,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err := BuildKeyspace(good) + _, err := BuildKeyspace(good, sqlparser.NewTestParser()) require.NoError(t, err) bad := &vschemapb.Keyspace{ Sharded: true, @@ -2598,7 +2598,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err = BuildKeyspace(bad) + _, err = BuildKeyspace(bad, sqlparser.NewTestParser()) want := `vindexType "absent" not found` if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Validate: %v, must start with %s", err, want) @@ -2794,7 +2794,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) none := &Table{ Name: sqlparser.NewIdentifierCS("none"), Keyspace: &Keyspace{ @@ -2835,7 +2835,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema = BuildVSchema(&input) + vschema = BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "none") wantErr := "table none not found" if err == nil || err.Error() != wantErr { @@ -2869,7 +2869,7 @@ func TestMultiColVindexPartialAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "user_region") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 2) @@ -2902,7 +2902,7 @@ func TestMultiColVindexPartialNotAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "multiColTbl") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 1) @@ -2939,7 +2939,7 @@ func TestSourceTableHasReferencedBy(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) ref1, err := vs.FindTable("sharded1", "ref") require.NoError(t, err) ref2, err := vs.FindTable("sharded2", "ref") @@ -2973,7 +2973,7 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) t1, err := vs.FindTable("unsharded", "t1") require.NoError(t, err) // If the source of a reference table does not require explicit routing, @@ -2983,7 +2983,7 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { require.Equal(t, t1, globalT1) input.Keyspaces["unsharded"].RequireExplicitRouting = true - vs = BuildVSchema(&input) + vs = BuildVSchema(&input, sqlparser.NewTestParser()) _, err = vs.FindTable("sharded", "t1") require.NoError(t, err) // If the source of a reference table requires explicit routing, then @@ -3019,7 +3019,7 @@ func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vs.FindTable("", "t1") require.Error(t, err) } @@ -3120,7 +3120,7 @@ func TestFindTableWithSequences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) notFoundError := func(table string) string { return fmt.Sprintf("table %s not found", table) diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index 20c11634b54..e202186894a 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -44,6 +44,7 @@ type VSchemaManager struct { cell string subscriber func(vschema *vindexes.VSchema, stats *VSchemaStats) schema SchemaInfo + parser *sqlparser.Parser } // SchemaInfo is an interface to schema tracker. @@ -71,7 +72,7 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ksName string, vsch ks := vschema.Keyspaces[ksName] - _, err = vindexes.BuildKeyspace(ks) + _, err = vindexes.BuildKeyspace(ks, vm.parser) if err != nil { return err } @@ -132,7 +133,7 @@ func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) bool if v == nil { // We encountered an error, build an empty vschema. if vm.currentVschema == nil { - vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}) + vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}, vm.parser) } } else { vschema = vm.buildAndEnhanceVSchema(v) @@ -187,7 +188,7 @@ func (vm *VSchemaManager) Rebuild() { // buildAndEnhanceVSchema builds a new VSchema and uses information from the schema tracker to update it func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vindexes.VSchema { - vschema := vindexes.BuildVSchema(v) + vschema := vindexes.BuildVSchema(v, vm.parser) if vm.schema != nil { vm.updateFromSchema(vschema) // We mark the keyspaces that have foreign key management in Vitess and have cyclic foreign keys diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go index 4d414c9d58a..53cbc323720 100644 --- a/go/vt/vtgate/vschema_manager_test.go +++ b/go/vt/vtgate/vschema_manager_test.go @@ -575,7 +575,7 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 435f53e33e0..4716cdcf794 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -299,10 +299,19 @@ func Init( log.Fatal("Failed to create a new sidecar database identifier cache during init as one already existed!") } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("unable to initialize sql parser: %v", err) + } + var si SchemaInfo // default nil var st *vtschema.Tracker if enableSchemaChangeSignal { - st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews) + st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews, parser) addKeyspacesToTracker(ctx, srvResolver, st, gw) si = st } @@ -323,6 +332,7 @@ func Init( pv, warmingReadsPercent, collationEnv, + parser, ) if err := executor.defaultQueryLogger(); err != nil { @@ -464,7 +474,7 @@ func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute, vtg.executor.vm.parser) return session, nil, err } @@ -530,7 +540,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MyS "BindVariables": bindVariables, "Session": session, } - return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) + return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute, vtg.executor.vm.parser) } return safeSession.Session, nil } @@ -570,7 +580,7 @@ handleError: "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare) + err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare, vtg.executor.vm.parser) return session, nil, err } @@ -589,7 +599,7 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats { return vtg.executor.VSchemaStats() } -func truncateErrorStrings(data map[string]any) map[string]any { +func truncateErrorStrings(data map[string]any, parser *sqlparser.Parser) map[string]any { ret := map[string]any{} if terseErrors { // request might have PII information. Return an empty map @@ -598,16 +608,16 @@ func truncateErrorStrings(data map[string]any) map[string]any { for key, val := range data { mapVal, ok := val.(map[string]any) if ok { - ret[key] = truncateErrorStrings(mapVal) + ret[key] = truncateErrorStrings(mapVal, parser) } else { strVal := fmt.Sprintf("%v", val) - ret[key] = sqlparser.TruncateForLog(strVal) + ret[key] = parser.TruncateForLog(strVal) } } return ret } -func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger) error { +func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger, parser *sqlparser.Parser) error { ec := vterrors.Code(err) fullKey := []string{ statsKey[0], @@ -623,7 +633,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any } // Traverse the request structure and truncate any long values - request = truncateErrorStrings(request) + request = truncateErrorStrings(request, parser) errorCounts.Add(fullKey, 1) @@ -638,7 +648,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any if !exists { return err } - piiSafeSQL, err2 := sqlparser.RedactSQLQuery(sql.(string)) + piiSafeSQL, err2 := parser.RedactSQLQuery(sql.(string)) if err2 != nil { return err } diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index e671cb447c7..eb70eaeb9cb 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -57,6 +57,19 @@ func NewClient() *QueryClient { } } +// NewClientWithServer creates a new client for a given server. +func NewClientWithServer(server *tabletserver.TabletServer) *QueryClient { + return &QueryClient{ + ctx: callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"}, + ), + target: Target, + server: server, + } +} + // NewClientWithTabletType creates a new client for Server with the provided tablet type. func NewClientWithTabletType(tabletType topodatapb.TabletType) *QueryClient { targetCopy := Target.CloneVT() diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index cca4bc90407..e966b934cb8 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/yaml2" @@ -78,7 +79,7 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql } TopoServer = memorytopo.NewServer(ctx, "") - Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}, collations.MySQL8()) + Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) Server.Register() err := Server.StartService(Target, dbcfgs, nil /* mysqld */) if err != nil { diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 5c37a5d9bb0..a4e7dea89e3 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -28,20 +28,17 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) @@ -628,66 +625,6 @@ func (tl *testLogger) getLog(i int) string { return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(tl.logs)) } -func TestLogTruncation(t *testing.T) { - client := framework.NewClient() - tl := newTestLogger() - defer tl.Close() - - // Test that a long error string is not truncated by default - _, err := client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog := `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - wantErr := wantLog - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(0) != wantLog { - t.Errorf("log was unexpectedly truncated: got\n'%s', want\n'%s'", tl.getLog(0), wantLog) - } - - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that the data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED]", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(1) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(1), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that trailing comments are preserved data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED] /* KEEP ME */", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(2) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(2), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } -} - func TestClientFoundRows(t *testing.T) { client := framework.NewClient() if _, err := client.Execute("insert into vitess_test(intval, charval) values(124, 'aa')", nil); err != nil { diff --git a/go/vt/vttablet/onlineddl/analysis.go b/go/vt/vttablet/onlineddl/analysis.go index 68eee5d4b9b..dbd8a5dab45 100644 --- a/go/vt/vttablet/onlineddl/analysis.go +++ b/go/vt/vttablet/onlineddl/analysis.go @@ -75,7 +75,7 @@ func (e *Executor) getCreateTableStatement(ctx context.Context, tableName string if err != nil { return nil, vterrors.Wrapf(err, "in Executor.getCreateTableStatement()") } - stmt, err := sqlparser.ParseStrictDDL(showCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(showCreateTable) if err != nil { return nil, err } @@ -349,7 +349,7 @@ func AnalyzeInstantDDL(alterTable *sqlparser.AlterTable, createTable *sqlparser. // analyzeSpecialAlterPlan checks if the given ALTER onlineDDL, and for the current state of affected table, // can be executed in a special way. If so, it returns with a "special plan" func (e *Executor) analyzeSpecialAlterPlan(ctx context.Context, onlineDDL *schema.OnlineDDL, capableOf mysql.CapableOf) (*SpecialAlterPlan, error) { - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/analysis_test.go b/go/vt/vttablet/onlineddl/analysis_test.go index afaa3e8aa1f..d1510cf1773 100644 --- a/go/vt/vttablet/onlineddl/analysis_test.go +++ b/go/vt/vttablet/onlineddl/analysis_test.go @@ -208,15 +208,16 @@ func TestAnalyzeInstantDDL(t *testing.T) { instant: false, }, } + parser := sqlparser.NewTestParser() for _, tc := range tt { name := tc.version + " " + tc.create t.Run(name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.create) + stmt, err := parser.ParseStrictDDL(tc.create) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(tc.alter) + stmt, err = parser.ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index e376b67ea4c..4d0ada6bfe2 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -304,7 +304,7 @@ func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, que defer conn.Recycle() // Replace any provided sidecar DB qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := e.env.SQLParser().ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -413,7 +413,7 @@ func (e *Executor) allowConcurrentMigration(onlineDDL *schema.OnlineDDL) (action } var err error - action, err = onlineDDL.GetAction() + action, err = onlineDDL.GetAction(e.env.SQLParser()) if err != nil { return action, false } @@ -800,7 +800,7 @@ func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, tableNa for _, row := range rs.Named().Rows { threadId := row.AsInt64("id", 0) infoQuery := row.AsString("info", "") - stmt, err := sqlparser.Parse(infoQuery) + stmt, err := e.env.SQLParser().Parse(infoQuery) if err != nil { log.Error(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery)) continue @@ -1383,7 +1383,7 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O constraintMap map[string]string, err error, ) { - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(originalShowCreateTable) if err != nil { return nil, nil, nil, err } @@ -1449,7 +1449,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return nil, err } - stmt, err := sqlparser.ParseStrictDDL(onlineDDL.SQL) + stmt, err := e.env.SQLParser().ParseStrictDDL(onlineDDL.SQL) if err != nil { return nil, err } @@ -1476,7 +1476,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag(), e.env.CollationEnv()) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag(), e.env.CollationEnv(), e.env.SQLParser()) return v, nil } @@ -1530,7 +1530,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false, e.env.CollationEnv()) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false, e.env.CollationEnv(), e.env.SQLParser()) v.pos = revertStream.pos return v, nil } @@ -2400,7 +2400,7 @@ func (e *Executor) reviewEmptyTableRevertMigrations(ctx context.Context, onlineD // Try to update table name and ddl_action // Failure to do so fails the migration - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.SQLParser()) if err != nil { return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot analyze revert UUID for revert migration %s: %v", onlineDDL.UUID, err)) } @@ -2554,7 +2554,7 @@ func (e *Executor) reviewQueuedMigrations(ctx context.Context) error { func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigration *schema.OnlineDDL, revertingMigrationUUID string) (err error) { // Validation: migration to revert exists and is in complete state - action, actionStr, err := revertMigration.GetActionStr() + action, actionStr, err := revertMigration.GetActionStr(e.env.SQLParser()) if err != nil { return err } @@ -2623,7 +2623,7 @@ func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigrat // - what type of migration we're reverting? (CREATE/DROP/ALTER) // - revert appropriately to the type of migration func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.SQLParser()) if err != nil { return fmt.Errorf("cannot run a revert migration %v: %+v", onlineDDL.UUID, err) } @@ -2736,7 +2736,7 @@ func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDD func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schema.OnlineDDL) (diff schemadiff.EntityDiff, err error) { // Modify the CREATE TABLE statement to indicate a different, made up table name, known as the "comparison table" - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return nil, err } @@ -2793,9 +2793,9 @@ func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schem hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementApplyHigher} switch ddlStmt.(type) { case *sqlparser.CreateTable: - diff, err = schemadiff.DiffCreateTablesQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateTablesQueries(existingShowCreateTable, newShowCreateTable, hints, e.env.SQLParser()) case *sqlparser.CreateView: - diff, err = schemadiff.DiffCreateViewsQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateViewsQueries(existingShowCreateTable, newShowCreateTable, hints, e.env.SQLParser()) default: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expected CREATE TABLE or CREATE VIEW in online DDL statement: %v", onlineDDL.SQL) } @@ -2856,7 +2856,7 @@ func (e *Executor) analyzeDropDDLActionMigration(ctx context.Context, onlineDDL } } } - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(originalShowCreateTable) if err != nil { return err } @@ -2902,7 +2902,7 @@ func (e *Executor) executeDropDDLActionMigration(ctx context.Context, onlineDDL // We transform a DROP TABLE into a RENAME TABLE statement, so as to remove the table safely and asynchronously. - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -2945,7 +2945,7 @@ func (e *Executor) executeCreateDDLActionMigration(ctx context.Context, onlineDD e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3032,7 +3032,7 @@ func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema if err != nil { return err } - stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return err } @@ -3191,7 +3191,7 @@ func (e *Executor) executeAlterDDLActionMigration(ctx context.Context, onlineDDL failMigration := func(err error) error { return e.failMigration(ctx, onlineDDL, err) } - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3264,7 +3264,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin return e.failMigration(ctx, onlineDDL, err) } - ddlAction, err := onlineDDL.GetAction() + ddlAction, err := onlineDDL.GetAction(e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3298,7 +3298,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table does not exist { // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3325,7 +3325,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table exists and is identical to CREATE statement // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3466,7 +3466,7 @@ func (e *Executor) runNextMigration(ctx context.Context) error { } { // We strip out any VT query comments because our simplified parser doesn't work well with comments - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err == nil { ddlStmt.SetComments(sqlparser.Comments{}) onlineDDL.SQL = sqlparser.String(ddlStmt) @@ -4862,7 +4862,7 @@ func (e *Executor) submittedMigrationConflictsWithPendingMigrationInSingletonCon return false } // Let's see if the pending migration is a revert: - if _, err := pendingOnlineDDL.GetRevertUUID(); err != nil { + if _, err := pendingOnlineDDL.GetRevertUUID(e.env.SQLParser()); err != nil { // Not a revert. So the pending migration definitely conflicts with our migration. return true } @@ -4997,13 +4997,13 @@ func (e *Executor) SubmitMigration( // OK, this is a new UUID - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(e.env.SQLParser()) if err != nil { return nil, err } log.Infof("SubmitMigration: request to submit migration %s; action=%s, table=%s", onlineDDL.UUID, actionStr, onlineDDL.Table) - revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error. + revertedUUID, _ := onlineDDL.GetRevertUUID(e.env.SQLParser()) // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 { // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override! @@ -5029,7 +5029,7 @@ func (e *Executor) SubmitMigration( sqltypes.BoolBindVariable(onlineDDL.StrategySetting().IsPostponeCompletion()), sqltypes.BoolBindVariable(allowConcurrentMigration), sqltypes.StringBindVariable(revertedUUID), - sqltypes.BoolBindVariable(onlineDDL.IsView()), + sqltypes.BoolBindVariable(onlineDDL.IsView(e.env.SQLParser())), ) if err != nil { return nil, err diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go index 9e100fa43eb..fac0cf7efcf 100644 --- a/go/vt/vttablet/onlineddl/executor_test.go +++ b/go/vt/vttablet/onlineddl/executor_test.go @@ -28,6 +28,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" ) @@ -44,7 +47,9 @@ func TestGetConstraintType(t *testing.T) { } func TestValidateAndEditCreateTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "ValidateAndEditCreateTableStatementTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { name string query string @@ -156,7 +161,7 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.query) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.query) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -186,7 +191,9 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } func TestValidateAndEditAlterTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "TestValidateAndEditAlterTableStatementTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { alter string m map[string]string @@ -256,7 +263,7 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { } for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -268,7 +275,7 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "t", Options: "--unsafe-allow-foreign-keys"} alters, err := e.validateAndEditAlterTableStatement(context.Background(), onlineDDL, alterTable, m) assert.NoError(t, err) - altersStrings := []string{} + var altersStrings []string for _, alter := range alters { altersStrings = append(altersStrings, sqlparser.String(alter)) } @@ -278,7 +285,9 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { } func TestAddInstantAlgorithm(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "AddInstantAlgorithmTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { alter string expect string @@ -302,7 +311,7 @@ func TestAddInstantAlgorithm(t *testing.T) { } for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -312,7 +321,7 @@ func TestAddInstantAlgorithm(t *testing.T) { assert.Equal(t, tc.expect, alterInstant) - stmt, err = sqlparser.ParseStrictDDL(alterInstant) + stmt, err = e.env.SQLParser().ParseStrictDDL(alterInstant) require.NoError(t, err) _, ok = stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -321,7 +330,9 @@ func TestAddInstantAlgorithm(t *testing.T) { } func TestDuplicateCreateTable(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "DuplicateCreateTableTest", collations.MySQL8(), sqlparser.NewTestParser()), + } ctx := context.Background() onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "something", Strategy: "vitess", Options: "--unsafe-allow-foreign-keys"} diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index 34eefbbfd0d..1f9b422563d 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -138,6 +138,7 @@ type VRepl struct { convertCharset map[string](*binlogdatapb.CharsetConversion) collationEnv *collations.Environment + sqlparser *sqlparser.Parser } // NewVRepl creates a VReplication handler for Online DDL @@ -152,6 +153,7 @@ func NewVRepl(workflow string, alterQuery string, analyzeTable bool, collationEnv *collations.Environment, + parser *sqlparser.Parser, ) *VRepl { return &VRepl{ workflow: workflow, @@ -169,6 +171,7 @@ func NewVRepl(workflow string, intToEnumMap: map[string]bool{}, convertCharset: map[string](*binlogdatapb.CharsetConversion){}, collationEnv: collationEnv, + sqlparser: parser, } } @@ -388,7 +391,7 @@ func (v *VRepl) analyzeAlter(ctx context.Context) error { // Happens for REVERT return nil } - if err := v.parser.ParseAlterStatement(v.alterQuery); err != nil { + if err := v.parser.ParseAlterStatement(v.alterQuery, v.sqlparser); err != nil { return err } if v.parser.IsRenameTable() { @@ -459,7 +462,7 @@ func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection } v.addedUniqueKeys = vrepl.AddedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) v.removedUniqueKeys = vrepl.RemovedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) - v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.originalShowCreateTable, v.vreplShowCreateTable) + v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.sqlparser, v.originalShowCreateTable, v.vreplShowCreateTable) if err != nil { return err } diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go index f0925594ec0..26a46879f79 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go @@ -27,6 +27,7 @@ import ( // RemovedForeignKeyNames returns the names of removed foreign keys, ignoring mere name changes func RemovedForeignKeyNames( + parser *sqlparser.Parser, originalCreateTable string, vreplCreateTable string, ) (names []string, err error) { @@ -34,7 +35,7 @@ func RemovedForeignKeyNames( return nil, nil } diffHints := schemadiff.DiffHints{ConstraintNamesStrategy: schemadiff.ConstraintNamesIgnoreAll} - diff, err := schemadiff.DiffCreateTablesQueries(originalCreateTable, vreplCreateTable, &diffHints) + diff, err := schemadiff.DiffCreateTablesQueries(originalCreateTable, vreplCreateTable, &diffHints, parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go index 619ba4847d9..7b8cf0e7363 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go @@ -24,6 +24,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestRemovedForeignKeyNames(t *testing.T) { @@ -66,7 +68,7 @@ func TestRemovedForeignKeyNames(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.before, func(t *testing.T) { - names, err := RemovedForeignKeyNames(tcase.before, tcase.after) + names, err := RemovedForeignKeyNames(sqlparser.NewTestParser(), tcase.before, tcase.after) assert.NoError(t, err) assert.Equal(t, tcase.names, names) }) diff --git a/go/vt/vttablet/onlineddl/vrepl/parser.go b/go/vt/vttablet/onlineddl/vrepl/parser.go index f1f2f1378d8..b5648adeabe 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser.go @@ -78,8 +78,8 @@ func (p *AlterTableParser) analyzeAlter(alterTable *sqlparser.AlterTable) { } // ParseAlterStatement is the main function of th eparser, and parses an ALTER TABLE statement -func (p *AlterTableParser) ParseAlterStatement(alterQuery string) (err error) { - stmt, err := sqlparser.ParseStrictDDL(alterQuery) +func (p *AlterTableParser) ParseAlterStatement(alterQuery string, parser *sqlparser.Parser) (err error) { + stmt, err := parser.ParseStrictDDL(alterQuery) if err != nil { return err } diff --git a/go/vt/vttablet/onlineddl/vrepl/parser_test.go b/go/vt/vttablet/onlineddl/vrepl/parser_test.go index f849b1d741d..2a7031f3a98 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser_test.go @@ -24,12 +24,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestParseAlterStatement(t *testing.T) { statement := "alter table t add column t int, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -38,7 +40,7 @@ func TestParseAlterStatement(t *testing.T) { func TestParseAlterStatementTrivialRename(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -66,7 +68,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { for _, statement := range statements { parser := NewAlterTableParser() statement := "alter table t " + statement - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.True(t, parser.IsAutoIncrementDefined()) } @@ -75,7 +77,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { func TestParseAlterStatementTrivialRenames(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -98,7 +100,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) { for _, statement := range statements { statement := "alter table t " + statement parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.IsAutoIncrementDefined()) renames := parser.GetNonTrivialRenames() @@ -113,7 +115,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 1) assert.True(t, parser.droppedColumns["b"]) @@ -121,7 +123,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 2) assert.True(t, parser.droppedColumns["b"]) @@ -130,7 +132,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 3) assert.True(t, parser.droppedColumns["b"]) @@ -140,7 +142,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop bad statement, add column i int" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.Error(t, err) } } @@ -177,7 +179,7 @@ func TestParseAlterStatementRenameTable(t *testing.T) { for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { parser := NewAlterTableParser() - err := parser.ParseAlterStatement(tc.alter) + err := parser.ParseAlterStatement(tc.alter, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tc.isRename, parser.isRenameTable) }) diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 55a635984ec..ad9c1b3702f 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -128,6 +128,8 @@ type SandboxConn struct { NotServing bool getSchemaResult []map[string]string + + parser *sqlparser.Parser } var _ queryservice.QueryService = (*SandboxConn)(nil) // compile-time interface check @@ -139,6 +141,7 @@ func NewSandboxConn(t *topodatapb.Tablet) *SandboxConn { MustFailCodes: make(map[vtrpcpb.Code]int), MustFailExecute: make(map[sqlparser.StatementType]int), txIDToRID: make(map[int64]int64), + parser: sqlparser.NewTestParser(), } } @@ -225,7 +228,7 @@ func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, que return nil, err } - stmt, _ := sqlparser.Parse(query) // knowingly ignoring the error + stmt, _ := sbc.parser.Parse(query) // knowingly ignoring the error if sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] > 0 { sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] = sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] - 1 return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "failed query: %v", query) @@ -251,7 +254,7 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe sbc.sExecMu.Unlock() return err } - parse, _ := sqlparser.Parse(query) + parse, _ := sbc.parser.Parse(query) if sbc.results == nil { nextRs := sbc.getNextResult(parse) diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index 97c1a910af0..22d0abbc9e5 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -426,7 +426,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati Port: connParams.Port, } dbCfgs.SetDbParams(*connParams, *connParams, *connParams) - vsClient := vreplication.NewReplicaConnector(connParams, tm.CollationEnv) + vsClient := vreplication.NewReplicaConnector(connParams, tm.CollationEnv, tm.SQLParser) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index 8b8ac605893..4a2da2bf310 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -54,7 +54,7 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag // Handle special possible directives var directives *sqlparser.CommentDirectives - if stmt, err := sqlparser.Parse(string(req.Query)); err == nil { + if stmt, err := tm.SQLParser.Parse(string(req.Query)); err == nil { if cmnt, ok := stmt.(sqlparser.Commented); ok { directives = cmnt.GetParsedComments().Directives() } @@ -66,7 +66,7 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -131,7 +131,7 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag } defer conn.Recycle() // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -145,7 +145,7 @@ func (tm *TabletManager) ExecuteQuery(ctx context.Context, req *tabletmanagerdat tablet := tm.Tablet() target := &querypb.Target{Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type} // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index d81d2a6e6a4..60881b4eab3 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -329,7 +329,7 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta // VReplicationExec executes a vreplication command. func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index a70220a68fc..d062183e8c5 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -111,7 +111,7 @@ func TestCreateVReplicationWorkflow(t *testing.T) { targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) defer tenv.deleteTablet(targetTablet.tablet) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tests := []struct { name string @@ -268,7 +268,7 @@ func TestMoveTables(t *testing.T) { }, }) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tenv.mysqld.Schema = defaultSchema tenv.mysqld.Schema.DatabaseSchema = tenv.dbName @@ -656,7 +656,7 @@ func TestSourceShardSelection(t *testing.T) { defer tenv.deleteTablet(tt.tablet) } - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ Sharded: true, @@ -855,7 +855,7 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { sourceKs, shard, table, table) tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) defer tenv.close() - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) defer tenv.deleteTablet(sourceTablet.tablet) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 143638e994b..1910050e802 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -63,6 +63,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -156,6 +157,7 @@ type TabletManager struct { VREngine *vreplication.Engine VDiffEngine *vdiff.Engine CollationEnv *collations.Environment + SQLParser *sqlparser.Parser // tmState manages the TabletManager state. tmState *tmState diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index 541b8d018fc..16e8a89d90e 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -37,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) type Engine struct { @@ -72,15 +71,17 @@ type Engine struct { fortests bool collationEnv *collations.Environment + parser *sqlparser.Parser } -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, tablet *topodata.Tablet, collationEnv *collations.Environment) *Engine { +func NewEngine(ts *topo.Server, tablet *topodata.Tablet, collationEnv *collations.Environment, parser *sqlparser.Parser) *Engine { vde := &Engine{ controllers: make(map[int64]*controller), ts: ts, thisTablet: tablet, tmClientFactory: func() tmclient.TabletManagerClient { return tmclient.NewTabletManagerClient() }, collationEnv: collationEnv, + parser: parser, } return vde } @@ -99,6 +100,7 @@ func NewTestEngine(ts *topo.Server, tablet *topodata.Tablet, dbn string, dbcf fu tmClientFactory: tmcf, fortests: true, collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vde } @@ -109,10 +111,10 @@ func (vde *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { return } vde.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vde.parser) } vde.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vde.parser) } vde.dbName = dbcfgs.DBName } diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go index d0b81179f0f..a75349817e1 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go @@ -397,7 +397,7 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } func (dbc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/report.go b/go/vt/vttablet/tabletmanager/vdiff/report.go index f61929ea32c..62ce6d24585 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/report.go +++ b/go/vt/vttablet/tabletmanager/vdiff/report.go @@ -66,7 +66,7 @@ type RowDiff struct { func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]string) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.wd.ct.vde.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index f71a36a4fe2..2211f8fbc45 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -64,7 +64,7 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str table: td.table, dbName: dbName, } - statement, err := sqlparser.Parse(td.sourceQuery) + statement, err := td.wd.ct.vde.parser.Parse(td.sourceQuery) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index b168625d20a..4e8e827145a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -51,8 +51,8 @@ const ( ) // buildControllerPlan parses the input query and returns an appropriate plan. -func buildControllerPlan(query string) (*controllerPlan, error) { - stmt, err := sqlparser.Parse(query) +func buildControllerPlan(query string, parser *sqlparser.Parser) (*controllerPlan, error) { + stmt, err := parser.Parse(query) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 391b8d9c67e..275fb7fc455 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testControllerPlan struct { @@ -240,7 +242,7 @@ func TestControllerPlan(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - pl, err := buildControllerPlan(tcase.in) + pl, err := buildControllerPlan(tcase.in, sqlparser.NewTestParser()) if tcase.err != "" { require.EqualError(t, err, tcase.err) return diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 4a0be0b83ff..1f8f2236e6a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -38,6 +38,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -112,6 +113,7 @@ type Engine struct { shortcircuit bool collationEnv *collations.Environment + parser *sqlparser.Parser } type journalEvent struct { @@ -128,16 +130,17 @@ type PostCopyAction struct { // NewEngine creates a new Engine. // A nil ts means that the Engine is disabled. -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler, collationEnv *collations.Environment) *Engine { +func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler, collationEnv *collations.Environment, parser *sqlparser.Parser) *Engine { vre := &Engine{ controllers: make(map[int32]*controller), ts: ts, cell: cell, mysqld: mysqld, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(config.ExternalConnections, collationEnv), + ec: newExternalConnector(config.ExternalConnections, collationEnv, parser), throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerapp.VReplicationName, throttle.ThrottleCheckPrimaryWrite), collationEnv: collationEnv, + parser: parser, } return vre @@ -150,10 +153,10 @@ func (vre *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { return } vre.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vre.parser) } vre.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vre.parser) } vre.dbName = dbcfgs.DBName } @@ -169,8 +172,9 @@ func NewTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, db dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig, collations.MySQL8()), + ec: newExternalConnector(externalConfig, collations.MySQL8(), sqlparser.NewTestParser()), collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vre } @@ -187,9 +191,10 @@ func NewSimpleTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaem dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig, collations.MySQL8()), + ec: newExternalConnector(externalConfig, collations.MySQL8(), sqlparser.NewTestParser()), shortcircuit: true, collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vre } @@ -366,7 +371,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) } defer vre.updateStats() - plan, err := buildControllerPlan(query) + plan, err := buildControllerPlan(query, vre.parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index a0a230dbe28..a3974f70b90 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -28,6 +28,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -62,13 +63,15 @@ type externalConnector struct { dbconfigs map[string]*dbconfigs.DBConfigs connectors map[string]*mysqlConnector collationEnv *collations.Environment + parser *sqlparser.Parser } -func newExternalConnector(dbcfgs map[string]*dbconfigs.DBConfigs, collationEnv *collations.Environment) *externalConnector { +func newExternalConnector(dbcfgs map[string]*dbconfigs.DBConfigs, collationEnv *collations.Environment, parser *sqlparser.Parser) *externalConnector { return &externalConnector{ dbconfigs: dbcfgs, connectors: make(map[string]*mysqlConnector), collationEnv: collationEnv, + parser: parser, } } @@ -93,7 +96,7 @@ func (ec *externalConnector) Get(name string) (*mysqlConnector, error) { return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "external mysqlConnector %v not found", name) } c := &mysqlConnector{} - c.env = tabletenv.NewEnv(config, name, ec.collationEnv) + c.env = tabletenv.NewEnv(config, name, ec.collationEnv, ec.parser) c.se = schema.NewEngine(c.env) c.vstreamer = vstreamer.NewEngine(c.env, nil, c.se, nil, "") c.vstreamer.InitDBConfig("", "") diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 64a924f28d3..3d811c65914 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -32,6 +32,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" @@ -44,7 +46,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -495,7 +496,7 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } func (dc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go index 10644d898e2..a1b38eb07ae 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go @@ -20,6 +20,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -35,7 +36,7 @@ import ( // This is used by binlog server to make vstream connection // using the vstream connection, it will parse the events from binglog // to fetch the corresponding GTID for required recovery time -func NewReplicaConnector(connParams *mysql.ConnParams, collationEnv *collations.Environment) *ReplicaConnector { +func NewReplicaConnector(connParams *mysql.ConnParams, collationEnv *collations.Environment, parser *sqlparser.Parser) *ReplicaConnector { // Construct config := tabletenv.NewDefaultConfig() @@ -46,7 +47,7 @@ func NewReplicaConnector(connParams *mysql.ConnParams, collationEnv *collations. dbCfg.SetDbParams(*connParams, *connParams, *connParams) config.DB = dbCfg c := &ReplicaConnector{conn: connParams} - env := tabletenv.NewEnv(config, "source", collationEnv) + env := tabletenv.NewEnv(config, "source", collationEnv, parser) c.se = schema.NewEngine(env) c.se.SkipMetaCheck = true c.vstreamer = vstreamer.NewEngine(env, nil, c.se, nil, "") diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go index d2590975bb6..a328249d0e0 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -258,7 +258,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R if i > 0 { sqlbuffer.WriteString(", ") } - if err := tp.BulkInsertValues.AppendFromRow(sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { + if err := appendFromRow(tp.BulkInsertValues, sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { return nil, err } } @@ -607,3 +607,74 @@ func valsEqual(v1, v2 sqltypes.Value) bool { // Compare content only if none are null. return v1.ToString() == v2.ToString() } + +// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that +// the fields in the row are in the same order as the placeholders in this query. The fields might include generated +// columns which are dropped, by checking against skipFields, before binding the variables +// note: there can be more fields than bind locations since extra columns might be requested from the source if not all +// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for +// values from the database on the source: sum/count for aggregation queries, for example +func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { + bindLocations := pq.BindLocations() + if len(fields) < len(bindLocations) { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", + len(fields), len(bindLocations)) + } + + type colInfo struct { + typ querypb.Type + length int64 + offset int64 + } + rowInfo := make([]*colInfo, 0) + + offset := int64(0) + for i, field := range fields { // collect info required for fields to be bound + length := row.Lengths[i] + if !skipFields[strings.ToLower(field.Name)] { + rowInfo = append(rowInfo, &colInfo{ + typ: field.Type, + length: length, + offset: offset, + }) + } + if length > 0 { + offset += row.Lengths[i] + } + } + + // bind field values to locations + var offsetQuery int + for i, loc := range bindLocations { + col := rowInfo[i] + buf.WriteString(pq.Query[offsetQuery:loc.Offset]) + typ := col.typ + + switch typ { + case querypb.Type_TUPLE: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) + case querypb.Type_JSON: + if col.length < 0 { // An SQL NULL and not an actual JSON value + buf.WriteString(sqltypes.NullStr) + } else { // A JSON value (which may be a JSON null literal value) + buf2 := row.Values[col.offset : col.offset+col.length] + vv, err := vjson.MarshalSQLValue(buf2) + if err != nil { + return err + } + buf.WriteString(vv.RawStr()) + } + default: + if col.length < 0 { + // -1 means a null variable; serialize it directly + buf.WriteString(sqltypes.NullStr) + } else { + vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) + vv.EncodeSQLBytes2(buf) + } + } + offsetQuery = loc.Offset + loc.Length + } + buf.WriteString(pq.Query[offsetQuery:]) + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go index ce8dc61fd38..5dce71cf0f5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" @@ -240,7 +241,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3) on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1", Delete: "update t1 set c2=null where c1=:b_c1", @@ -262,7 +263,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1", "pk1", "pk2"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 from dual where (:a_pk1,:a_pk2) <= (1,'aaa') on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", Delete: "update t1 set c2=null where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", @@ -734,7 +735,7 @@ func TestBuildPlayerPlan(t *testing.T) { } for _, tcase := range testcases { - plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8()) + plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) gotPlan, _ := json.Marshal(plan) wantPlan, _ := json.Marshal(tcase.plan) if string(gotPlan) != string(wantPlan) { @@ -748,7 +749,7 @@ func TestBuildPlayerPlan(t *testing.T) { t.Errorf("Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) } - plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats(), collations.MySQL8()) + plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) if err != nil { continue } @@ -778,7 +779,7 @@ func TestBuildPlayerPlanNoDup(t *testing.T) { Filter: "select * from t", }}, } - _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8()) + _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) want := "more than one target for source table t" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("buildReplicatorPlan err: %v, must contain: %v", err, want) @@ -799,7 +800,7 @@ func TestBuildPlayerPlanExclude(t *testing.T) { Filter: "", }}, } - plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8()) + plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) assert.NoError(t, err) want := &TestReplicatorPlan{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index 69e712e9655..0f94b6b13d2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -132,7 +132,7 @@ const ( // The TablePlan built is a partial plan. The full plan for a table is built // when we receive field information from events or rows sent by the source. // buildExecutionPlan is the function that builds the full plan. -func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats, collationEnv *collations.Environment) (*ReplicatorPlan, error) { +func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats, collationEnv *collations.Environment, parser *sqlparser.Parser) (*ReplicatorPlan, error) { filter := source.Filter plan := &ReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{FieldEventMode: filter.FieldEventMode}, @@ -160,7 +160,7 @@ func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[strin if !ok { return nil, fmt.Errorf("table %s not found in schema", tableName) } - tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source, collationEnv) + tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source, collationEnv, parser) if err != nil { return nil, err } @@ -200,7 +200,7 @@ func MatchTable(tableName string, filter *binlogdatapb.Filter) (*binlogdatapb.Ru } func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*ColumnInfo, lastpk *sqltypes.Result, - stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource, collationEnv *collations.Environment) (*TablePlan, error) { + stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource, collationEnv *collations.Environment, parser *sqlparser.Parser) (*TablePlan, error) { filter := rule.Filter query := filter @@ -217,7 +217,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum case filter == ExcludeStr: return nil, nil } - sel, fromTable, err := analyzeSelectFrom(query) + sel, fromTable, err := analyzeSelectFrom(query, parser) if err != nil { return nil, err } @@ -381,8 +381,8 @@ func (tpb *tablePlanBuilder) generate() *TablePlan { } } -func analyzeSelectFrom(query string) (sel *sqlparser.Select, from string, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelectFrom(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, from string, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, "", err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index 01b11a26a1e..3f4a5f2710e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -219,7 +219,7 @@ func newVCopierCopyWorker( func (vc *vcopier) initTablesForCopy(ctx context.Context) error { defer vc.vr.dbClient.Rollback() - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return err } @@ -385,7 +385,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName]) - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go index 667ea9615f3..d0adc970382 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -54,7 +54,7 @@ func newCopyAllState(vc *vcopier) (*copyAllState, error) { state := ©AllState{ vc: vc, } - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index e15b1f12be4..f1265a1dd68 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -180,7 +180,7 @@ func (vp *vplayer) play(ctx context.Context) error { return nil } - plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats, vp.vr.vre.collationEnv) + plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats, vp.vr.vre.collationEnv, vp.vr.vre.parser) if err != nil { vp.vr.stats.ErrorCounts.Add([]string{"Plan"}, 1) return err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 9c065866c15..20d2ab4a59d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -726,7 +726,7 @@ func (vr *vreplicator) getTableSecondaryKeys(ctx context.Context, tableName stri } tableSchema := schema.TableDefinitions[0].Schema var secondaryKeys []*sqlparser.IndexDefinition - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := vr.vre.parser.ParseStrictDDL(tableSchema) if err != nil { return secondaryKeys, err } @@ -973,7 +973,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // the table schema and if so move forward and delete the // post_copy_action record. if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERDupKeyName { - stmt, err := sqlparser.ParseStrictDDL(action.Task) + stmt, err := vr.vre.parser.ParseStrictDDL(action.Task) if err != nil { return failedAlterErr } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index dd4b9dc70f8..3a5c0578661 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/schemadiff" + "vitess.io/vitess/go/vt/sqlparser" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -549,7 +550,7 @@ func TestDeferSecondaryKeys(t *testing.T) { // order in the table schema. if !tcase.expectFinalSchemaDiff { currentDDL := getCurrentDDL(tcase.tableName) - sdiff, err := schemadiff.DiffCreateTablesQueries(currentDDL, tcase.initialDDL, diffHints) + sdiff, err := schemadiff.DiffCreateTablesQueries(currentDDL, tcase.initialDDL, diffHints, sqlparser.NewTestParser()) require.NoError(t, err) require.Nil(t, sdiff, "Expected no schema difference but got: %s", sdiff.CanonicalStatementString()) } diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 63f4c73520e..7876cdf00db 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -81,7 +81,7 @@ func newPooledConn(ctx context.Context, pool *Pool, appParams dbconfigs.Connecto } // NewConn creates a new Conn without a pool. -func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting) (*Conn, error) { +func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting, env tabletenv.Env) (*Conn, error) { c, err := dbconnpool.NewDBConnection(ctx, params) if err != nil { return nil, err @@ -90,6 +90,7 @@ func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpoo conn: c, dbaPool: dbaPool, stats: tabletenv.NewStats(servenv.NewExporter("Temp", "Tablet")), + env: env, } dbconn.current.Store("") if setting == nil { @@ -483,9 +484,9 @@ func (dbc *Conn) CurrentForLogging() string { if dbc.env != nil && dbc.env.Config() != nil && !dbc.env.Config().SanitizeLogMessages { queryToLog = dbc.Current() } else { - queryToLog, _ = sqlparser.RedactSQLQuery(dbc.Current()) + queryToLog, _ = dbc.env.SQLParser().RedactSQLQuery(dbc.Current()) } - return sqlparser.TruncateForLog(queryToLog) + return dbc.env.SQLParser().TruncateForLog(queryToLog) } func (dbc *Conn) applySameSetting(ctx context.Context) (err error) { diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index 33c37d2b2c6..3687ed00c4f 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -27,12 +27,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) func compareTimingCounts(t *testing.T, op string, delta int64, before, after map[string]int64) { @@ -330,7 +333,7 @@ func TestDBNoPoolConnKill(t *testing.T) { params := dbconfigs.New(db.ConnParams()) connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := NewConn(context.Background(), params, connPool.dbaPool, nil) + dbConn, err := NewConn(context.Background(), params, connPool.dbaPool, nil, tabletenv.NewEnv(nil, "TestDBNoPoolConnKill", collations.MySQL8(), sqlparser.NewTestParser())) if dbConn != nil { defer dbConn.Close() } diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index a8eb2c52d83..567745e37b5 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -126,7 +126,7 @@ func (cp *Pool) Get(ctx context.Context, setting *smartconnpool.Setting) (*Poole defer span.Finish() if cp.isCallerIDAppDebug(ctx) { - conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting) + conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting, cp.env) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index f326392b83e..f4e6d6fa008 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -59,7 +60,7 @@ func TestConnPoolTimeout(t *testing.T) { } cfg.Timeout = time.Second cfg.IdleTimeout = 10 * time.Second - connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8()), "TestPool", cfg) + connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8(), sqlparser.NewTestParser()), "TestPool", cfg) params := dbconfigs.New(db.ConnParams()) connPool.Open(params, params, params) defer connPool.Close() @@ -336,7 +337,7 @@ func newPool() *Pool { } func newPoolWithCapacity(capacity int) *Pool { - return NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8()), "TestPool", tabletenv.ConnPoolConfig{ + return NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8(), sqlparser.NewTestParser()), "TestPool", tabletenv.ConnPoolConfig{ Size: capacity, IdleTimeout: 10 * time.Second, }) diff --git a/go/vt/vttablet/tabletserver/exclude_race_test.go b/go/vt/vttablet/tabletserver/exclude_race_test.go index 6e55671ac96..ee4364968c3 100644 --- a/go/vt/vttablet/tabletserver/exclude_race_test.go +++ b/go/vt/vttablet/tabletserver/exclude_race_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" @@ -26,7 +27,13 @@ func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { tl := newTestLogger() defer tl.Close() logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") - db, tsv := setupTabletServerTest(t, ctx, "") + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) + + db, tsv := setupTabletServerTestCustom(t, ctx, tabletenv.NewDefaultConfig(), "", parser) defer tsv.StopService() defer db.Close() @@ -37,9 +44,6 @@ func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3333333333), "bv4": sqltypes.Int64BindVariable(4444444444), } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) defer func() { err := logStats.Error diff --git a/go/vt/vttablet/tabletserver/fuzz.go b/go/vt/vttablet/tabletserver/fuzz.go index 6cb0f60ff93..c7f3dabde97 100644 --- a/go/vt/vttablet/tabletserver/fuzz.go +++ b/go/vt/vttablet/tabletserver/fuzz.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -58,7 +59,7 @@ func FuzzGetPlan(data []byte) int { // Set up the environment config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) defer qe.Close() diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index c43e9598d89..1a7a1392efb 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -46,7 +46,7 @@ func TestHealthStreamerClosed(t *testing.T) { db := fakesqldb.New(t) defer db.Close() config := newConfig(db) - env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -73,7 +73,7 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { config := newConfig(db) config.SignalWhenSchemaChange = true - env := tabletenv.NewEnv(config, "TestNotServingPrimary", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TestNotServingPrimary", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -104,7 +104,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { config := newConfig(db) config.SignalWhenSchemaChange = false - env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -219,7 +219,7 @@ func TestReloadSchema(t *testing.T) { config.SignalWhenSchemaChange = testcase.enableSchemaChange config.SchemaReloadInterval = 100 * time.Millisecond - env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -337,7 +337,7 @@ func TestReloadView(t *testing.T) { config.SchemaReloadInterval = 100 * time.Millisecond config.EnableViews = true - env := tabletenv.NewEnv(config, "TestReloadView", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TestReloadView", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{Cell: "cell", Uid: 1} se := schema.NewEngine(env) hs := newHealthStreamer(env, alias, se) diff --git a/go/vt/vttablet/tabletserver/livequeryz_test.go b/go/vt/vttablet/tabletserver/livequeryz_test.go index 18e62047226..e507f365afb 100644 --- a/go/vt/vttablet/tabletserver/livequeryz_test.go +++ b/go/vt/vttablet/tabletserver/livequeryz_test.go @@ -17,18 +17,19 @@ limitations under the License. package tabletserver import ( + "context" "net/http" "net/http/httptest" "testing" - "context" + "vitess.io/vitess/go/vt/sqlparser" ) func TestLiveQueryzHandlerJSON(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/?format=json", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) @@ -39,7 +40,7 @@ func TestLiveQueryzHandlerHTTP(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) @@ -50,7 +51,7 @@ func TestLiveQueryzHandlerHTTPFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz/", nil) - livequeryzHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -61,7 +62,7 @@ func TestLiveQueryzHandlerTerminateConn(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=1", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) testConn := &testConn{id: 1} queryList.Add(NewQueryDetail(context.Background(), testConn)) if testConn.IsKilled() { @@ -77,7 +78,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidConnID(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=invalid", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -88,7 +89,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz//terminate?inva+lid=2", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) diff --git a/go/vt/vttablet/tabletserver/messager/engine_test.go b/go/vt/vttablet/tabletserver/messager/engine_test.go index e85abe8b46d..ac817918f48 100644 --- a/go/vt/vttablet/tabletserver/messager/engine_test.go +++ b/go/vt/vttablet/tabletserver/messager/engine_test.go @@ -154,7 +154,7 @@ func TestEngineGenerate(t *testing.T) { func newTestEngine() *Engine { config := tabletenv.NewDefaultConfig() tsv := &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8()), + Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8(), sqlparser.NewTestParser()), } se := schema.NewEngine(tsv) te := NewEngine(tsv, se, newFakeVStreamer()) diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index b4154a21f8d..95bd1fb2b01 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -834,7 +834,7 @@ type fakeTabletServer struct { func newFakeTabletServer() *fakeTabletServer { config := tabletenv.NewDefaultConfig() return &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8()), + Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8(), sqlparser.NewTestParser()), } } diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go index 17baa72595e..aac0ed1f64a 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go @@ -178,7 +178,7 @@ func TestBuildPermissions(t *testing.T) { }} for _, tcase := range tcases { - stmt, err := sqlparser.Parse(tcase.input) + stmt, err := sqlparser.NewTestParser().Parse(tcase.input) if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index 97962be553e..5d05159b185 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -323,7 +323,7 @@ func hasLockFunc(sel *sqlparser.Select) bool { } // BuildSettingQuery builds a query for system settings. -func BuildSettingQuery(settings []string) (query string, resetQuery string, err error) { +func BuildSettingQuery(settings []string, parser *sqlparser.Parser) (query string, resetQuery string, err error) { if len(settings) == 0 { return "", "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: plan called for empty system settings") } @@ -331,7 +331,7 @@ func BuildSettingQuery(settings []string) (query string, resetQuery string, err var resetSetExprs sqlparser.SetExprs lDefault := sqlparser.NewStrLiteral("default") for _, setting := range settings { - stmt, err := sqlparser.Parse(setting) + stmt, err := parser.Parse(setting) if err != nil { return "", "", vterrors.Wrapf(err, "[BUG]: failed to parse system setting: %s", setting) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index 195a8037210..76a4c269dd4 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -74,6 +74,7 @@ func TestDDLPlan(t *testing.T) { func testPlan(t *testing.T, fileName string) { t.Helper() + parser := sqlparser.NewTestParser() testSchema := loadSchema("schema_test.json") for tcase := range iterateExecFile(fileName) { t.Run(tcase.input, func(t *testing.T) { @@ -82,7 +83,7 @@ func testPlan(t *testing.T, fileName string) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } @@ -112,6 +113,7 @@ func testPlan(t *testing.T, fileName string) { func TestPlanInReservedConn(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("exec_cases.txt") { t.Run(tcase.input, func(t *testing.T) { if strings.Contains(tcase.options, "PassthroughDMLs") { @@ -119,7 +121,7 @@ func TestPlanInReservedConn(t *testing.T) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } @@ -155,6 +157,7 @@ func TestCustom(t *testing.T) { t.Log("No schemas to test") return } + parser := sqlparser.NewTestParser() for _, schemFile := range testSchemas { schem := loadSchema(schemFile) t.Logf("Testing schema %s", schemFile) @@ -168,7 +171,7 @@ func TestCustom(t *testing.T) { for _, file := range files { t.Logf("Testing file %s", file) for tcase := range iterateExecFile(file) { - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err != nil { t.Fatalf("Got error: %v, parsing sql: %v", err.Error(), tcase.input) } @@ -193,10 +196,11 @@ func TestCustom(t *testing.T) { func TestStreamPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("stream_cases.txt") { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = BuildStreaming(statement, testSchema) } @@ -253,11 +257,12 @@ func TestMessageStreamingPlan(t *testing.T) { func TestLockPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("lock_cases.txt") { t.Run(tcase.input, func(t *testing.T) { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index 1e9dbf6ad12..977b3822050 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -339,7 +339,7 @@ } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal update @@ -355,7 +355,7 @@ options:PassthroughDMLs } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # cross-db update @@ -370,7 +370,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a.b set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # update unknown table @@ -385,7 +385,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # update unknown table @@ -401,7 +401,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # multi-table update @@ -420,7 +420,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a, b set a.`name` = 'foo' where a.id = b.id and b.var = 'test'", - "WhereClause": "where a.id = b.id and b.var = 'test'" + "WhereClause": " where a.id = b.id and b.var = 'test'" } # multi-table update @@ -440,7 +440,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a join b on a.id = b.id set a.`name` = 'foo' where b.var = 'test'", - "WhereClause": "where b.var = 'test'" + "WhereClause": " where b.var = 'test'" } @@ -499,7 +499,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal delete @@ -515,7 +515,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # delete unknown table @@ -563,7 +563,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete a, b from a, b where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index a82052a0578..26b89934628 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -359,7 +359,7 @@ func (qe *QueryEngine) Close() { var errNoCache = errors.New("plan should not be cached") func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.SQLParser().Parse(sql) if err != nil { return nil, err } @@ -402,7 +402,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } func (qe *QueryEngine) getStreamPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.SQLParser().Parse(sql) if err != nil { return nil, err } @@ -479,7 +479,7 @@ func (qe *QueryEngine) GetConnSetting(ctx context.Context, settings []string) (* cacheKey := SettingsCacheKey(buf.String()) connSetting, _, err := qe.settings.GetOrLoad(cacheKey, 0, func() (*smartconnpool.Setting, error) { // build the setting queries - query, resetQuery, err := planbuilder.BuildSettingQuery(settings) + query, resetQuery, err := planbuilder.BuildSettingQuery(settings, qe.env.SQLParser()) if err != nil { return nil, err } @@ -609,7 +609,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques response.Header().Set("Content-Type", "text/plain") qe.ForEachPlan(func(plan *TabletPlan) bool { - response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) + response.Write([]byte(fmt.Sprintf("%#v\n", qe.env.SQLParser().TruncateForUI(plan.Original)))) if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) } else { @@ -629,7 +629,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques var qstats []perQueryStats qe.ForEachPlan(func(plan *TabletPlan) bool { var pqstats perQueryStats - pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) + pqstats.Query = unicoded(qe.env.SQLParser().TruncateForUI(plan.Original)) pqstats.Table = plan.TableName().String() pqstats.Plan = plan.PlanID pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowsAffected, pqstats.RowsReturned, pqstats.ErrorCount = plan.Stats() @@ -697,7 +697,7 @@ func (qe *QueryEngine) handleHTTPConsolidations(response http.ResponseWriter, re for _, v := range items { var query string if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(v.Query) + query, _ = qe.env.SQLParser().RedactSQLQuery(v.Query) } else { query = v.Query } diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index b16539d67f8..f38d1a9e3cb 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -63,7 +63,7 @@ func TestStrictMode(t *testing.T) { // Test default behavior. config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.se.InitDBConfig(newDBConfigs(db).DbaWithDB()) @@ -356,7 +356,7 @@ func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfig config.OltpReadPool.IdleTimeout = idleTimeout config.OlapReadPool.IdleTimeout = idleTimeout config.TxPool.IdleTimeout = idleTimeout - env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) // the integration tests that check cache behavior do not expect a doorkeeper; disable it @@ -456,7 +456,7 @@ func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -514,7 +514,7 @@ func TestPlanCachePollution(t *testing.T) { config.DB = dbcfgs // config.LFUQueryCacheSizeBytes = 3 * 1024 * 1024 - env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -830,7 +830,7 @@ func TestAddQueryStats(t *testing.T) { config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(fakesqldb.New(t)) config.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics - env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name, collations.MySQL8()) + env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name, collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.tabletType, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) @@ -869,7 +869,7 @@ func TestPlanPoolUnsafe(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - statement, err := sqlparser.Parse(tcase.query) + statement, err := sqlparser.NewTestParser().Parse(tcase.query) require.NoError(t, err) plan, err := planbuilder.Build(statement, map[string]*schema.Table{}, "dbName", false, collations.MySQL8()) // Plan building will not fail, but it will mark that reserved connection is needed. diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 342755dde1d..e586a7f2d64 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -757,7 +757,7 @@ func (qre *QueryExecutor) verifyRowCount(count, maxrows int64) error { if warnThreshold > 0 && count > warnThreshold { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) qre.tsv.Stats().Warnings.Add("ResultsExceeded", 1) - log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true)) + log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true, qre.tsv.SQLParser())) } return nil } @@ -1146,7 +1146,7 @@ func (qre *QueryExecutor) GetSchemaDefinitions(tableType querypb.SchemaTableType } func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchViewQuery(viewNames) + query, err := eschema.GetFetchViewQuery(viewNames, qre.tsv.SQLParser()) if err != nil { return err } @@ -1154,7 +1154,7 @@ func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(s } func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableQuery(tableNames) + query, err := eschema.GetFetchTableQuery(tableNames, qre.tsv.SQLParser()) if err != nil { return err } @@ -1162,7 +1162,7 @@ func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func } func (qre *QueryExecutor) getAllDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableAndViewsQuery(tableNames) + query, err := eschema.GetFetchTableAndViewsQuery(tableNames, qre.tsv.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 9d83df3ffae..05888a8b77d 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -28,6 +28,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" @@ -1488,7 +1490,7 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } dbconfigs := newDBConfigs(db) config.DB = dbconfigs - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} err := tsv.StartService(target, dbconfigs, nil /* mysqld */) if config.TwoPCEnable { @@ -1566,7 +1568,7 @@ func initQueryExecutorTestDB(db *fakesqldb.DB) { "varchar|int64"), "Innodb_rows_read|0", )) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) } func getTestTableFields() []*querypb.Field { @@ -1659,7 +1661,7 @@ func addQueryExecutorSupportedQueries(db *fakesqldb.DB) { fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) for query, result := range queryResultMap { db.AddQuery(query, result) } diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go index efe63ab0a8e..a41f23b6aa0 100644 --- a/go/vt/vttablet/tabletserver/query_list.go +++ b/go/vt/vttablet/tabletserver/query_list.go @@ -57,13 +57,16 @@ type QueryList struct { // so have to maintain a list to compare with the actual connection. // and remove appropriately. queryDetails map[int64][]*QueryDetail + + parser *sqlparser.Parser } // NewQueryList creates a new QueryList -func NewQueryList(name string) *QueryList { +func NewQueryList(name string, parser *sqlparser.Parser) *QueryList { return &QueryList{ name: name, queryDetails: make(map[int64][]*QueryDetail), + parser: parser, } } @@ -150,7 +153,7 @@ func (ql *QueryList) AppendQueryzRows(rows []QueryDetailzRow) []QueryDetailzRow for _, qd := range qds { query := qd.conn.Current() if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = ql.parser.RedactSQLQuery(query) } row := QueryDetailzRow{ Type: ql.name, diff --git a/go/vt/vttablet/tabletserver/query_list_test.go b/go/vt/vttablet/tabletserver/query_list_test.go index 02b24d86cda..57b672a16e0 100644 --- a/go/vt/vttablet/tabletserver/query_list_test.go +++ b/go/vt/vttablet/tabletserver/query_list_test.go @@ -22,6 +22,8 @@ import ( "time" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testConn struct { @@ -44,7 +46,7 @@ func (tc *testConn) IsKilled() bool { } func TestQueryList(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd := NewQueryDetail(context.Background(), &testConn{id: connID}) ql.Add(qd) @@ -69,7 +71,7 @@ func TestQueryList(t *testing.T) { } func TestQueryListChangeConnIDInMiddle(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd1 := NewQueryDetail(context.Background(), &testConn{id: connID}) ql.Add(qd1) diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go index 41a40a0720c..8f42192c330 100644 --- a/go/vt/vttablet/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -59,7 +59,7 @@ var ( querylogzFuncMap = template.FuncMap{ "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, + "truncateQuery": sqlparser.NewTestParser().TruncateForUI, "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 151f028ca09..f56402a1fdb 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" ) @@ -157,7 +156,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { return true } Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(qe.env.SQLParser().TruncateForUI(plan.Original)), Table: plan.TableName().String(), Plan: plan.PlanID, } diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go index d9f302062c7..b46f5545bde 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go @@ -21,15 +21,15 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -146,7 +146,7 @@ func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader { dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") config.DB = dbc - tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest", collations.MySQL8())) + tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest", collations.MySQL8(), sqlparser.NewTestParser())) tr.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go index 58a577c7445..b405fb4131d 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go @@ -24,12 +24,12 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -43,7 +43,7 @@ func TestReplTracker(t *testing.T) { params := db.ConnParams() cp := *params config.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") - env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go index fade8918114..ade70c6f669 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go @@ -24,11 +24,11 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -74,7 +74,7 @@ func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter { cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") - tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest", collations.MySQL8()), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) + tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest", collations.MySQL8(), sqlparser.NewTestParser()), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) tw.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/schema/db.go b/go/vt/vttablet/tabletserver/schema/db.go index 5699ffc1bde..4bea80c4010 100644 --- a/go/vt/vttablet/tabletserver/schema/db.go +++ b/go/vt/vttablet/tabletserver/schema/db.go @@ -89,7 +89,7 @@ where table_schema = database() and table_name in ::viewNames` ) // reloadTablesDataInDB reloads teh tables information we have stored in our database we use for schema-tracking. -func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string) error { +func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string, parser *sqlparser.Parser) error { // No need to do anything if we have no tables to refresh or drop. if len(tables) == 0 && len(droppedTables) == 0 { return nil @@ -117,7 +117,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // Generate the queries to delete and insert table data. - clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable) + clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable, parser) if err != nil { return err } @@ -126,7 +126,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta return err } - insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables) + insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables, parser) if err != nil { return err } @@ -162,8 +162,8 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // generateFullQuery generates the full query from the query as a string. -func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { - stmt, err := sqlparser.Parse( +func generateFullQuery(query string, parser *sqlparser.Parser) (*sqlparser.ParsedQuery, error) { + stmt, err := parser.Parse( sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), sidecar.GetIdentifier()).Query) if err != nil { return nil, err @@ -174,7 +174,7 @@ func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { } // reloadViewsDataInDB reloads teh views information we have stored in our database we use for schema-tracking. -func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string) error { +func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string, parser *sqlparser.Parser) error { // No need to do anything if we have no views to refresh or drop. if len(views) == 0 && len(droppedViews) == 0 { return nil @@ -213,7 +213,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return nil }, func() *sqltypes.Result { return &sqltypes.Result{} }, - 1000, + 1000, parser, ) if err != nil { return err @@ -221,7 +221,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // Generate the queries to delete and insert view data. - clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable) + clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable, parser) if err != nil { return err } @@ -230,7 +230,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return err } - insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews) + insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews, parser) if err != nil { return err } @@ -266,8 +266,8 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // getViewDefinition gets the viewDefinition for the given views. -func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int) error { - viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions) +func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int, parser *sqlparser.Parser) error { + viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions, parser) if err != nil { return err } @@ -358,7 +358,7 @@ func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.Co } // reloadDataInDB reloads the schema tracking data in the database -func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, created []*Table, dropped []*Table) error { +func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, created []*Table, dropped []*Table, parser *sqlparser.Parser) error { // tablesToReload and viewsToReload stores the tables and views that need reloading and storing in our MySQL database. var tablesToReload, viewsToReload []*Table // droppedTables, droppedViews stores the list of tables and views we need to delete, respectively. @@ -382,19 +382,19 @@ func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, } } - if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables); err != nil { + if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables, parser); err != nil { return err } - if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews); err != nil { + if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews, parser); err != nil { return err } return nil } // GetFetchViewQuery gets the fetch query to run for getting the listed views. If no views are provided, then all the views are fetched. -func GetFetchViewQuery(viewNames []string) (string, error) { +func GetFetchViewQuery(viewNames []string, parser *sqlparser.Parser) (string, error) { if len(viewNames) == 0 { - parsedQuery, err := generateFullQuery(fetchViews) + parsedQuery, err := generateFullQuery(fetchViews, parser) if err != nil { return "", err } @@ -407,7 +407,7 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} - parsedQuery, err := generateFullQuery(fetchUpdatedViews) + parsedQuery, err := generateFullQuery(fetchUpdatedViews, parser) if err != nil { return "", err } @@ -415,9 +415,9 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } // GetFetchTableQuery gets the fetch query to run for getting the listed tables. If no tables are provided, then all the tables are fetched. -func GetFetchTableQuery(tableNames []string) (string, error) { +func GetFetchTableQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTables) + parsedQuery, err := generateFullQuery(fetchTables, parser) if err != nil { return "", err } @@ -430,7 +430,7 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTables) + parsedQuery, err := generateFullQuery(fetchUpdatedTables, parser) if err != nil { return "", err } @@ -438,9 +438,9 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } // GetFetchTableAndViewsQuery gets the fetch query to run for getting the listed tables and views. If no table names are provided, then all the tables and views are fetched. -func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { +func GetFetchTableAndViewsQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTablesAndViews) + parsedQuery, err := generateFullQuery(fetchTablesAndViews, parser) if err != nil { return "", err } @@ -453,7 +453,7 @@ func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews) + parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews, parser) if err != nil { return "", err } diff --git a/go/vt/vttablet/tabletserver/schema/db_test.go b/go/vt/vttablet/tabletserver/schema/db_test.go index 88f91b3c99a..742e2521854 100644 --- a/go/vt/vttablet/tabletserver/schema/db_test.go +++ b/go/vt/vttablet/tabletserver/schema/db_test.go @@ -26,12 +26,14 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var ( @@ -81,7 +83,7 @@ func TestGenerateFullQuery(t *testing.T) { tt.wantQuery = tt.query } - got, err := generateFullQuery(tt.query) + got, err := generateFullQuery(tt.query, sqlparser.NewTestParser()) if tt.wantErr != "" { require.EqualError(t, err, tt.wantErr) return @@ -96,7 +98,8 @@ func TestGenerateFullQuery(t *testing.T) { func TestGetCreateStatement(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetCreateStatement", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success view @@ -131,7 +134,8 @@ func TestGetCreateStatement(t *testing.T) { func TestGetChangedViewNames(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetChangedViewNames", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success @@ -164,7 +168,8 @@ func TestGetChangedViewNames(t *testing.T) { func TestGetViewDefinition(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetViewDefinition", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) viewsBV, err := sqltypes.BuildBindVariable([]string{"v1", "lead"}) @@ -209,7 +214,7 @@ func collectGetViewDefinitions(conn *connpool.Conn, bv map[string]*querypb.BindV return nil }, func() *sqltypes.Result { return &sqltypes.Result{} - }, 1000) + }, 1000, sqlparser.NewTestParser()) return viewDefinitions, err } @@ -336,7 +341,8 @@ func TestGetMismatchedTableNames(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tc.dbError != "" { @@ -456,7 +462,8 @@ func TestReloadTablesInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -467,7 +474,7 @@ func TestReloadTablesInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete) + err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -588,7 +595,8 @@ func TestReloadViewsInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -599,7 +607,7 @@ func TestReloadViewsInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete) + err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -878,7 +886,8 @@ func TestReloadDataInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -889,7 +898,7 @@ func TestReloadDataInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped) + err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -920,7 +929,7 @@ func TestGetFetchViewQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchViewQuery(testcase.viewNames) + query, err := GetFetchViewQuery(testcase.viewNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -947,7 +956,7 @@ func TestGetFetchTableQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableQuery(testcase.tableNames) + query, err := GetFetchTableQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -974,7 +983,7 @@ func TestGetFetchTableAndViewsQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableAndViewsQuery(testcase.tableNames) + query, err := GetFetchTableAndViewsQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index a55066a65ec..62a1e9afa2b 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -162,7 +162,7 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti } return conn.ExecuteFetch(query, maxRows, true) } - if err := sidecardb.Init(ctx, exec); err != nil { + if err := sidecardb.Init(ctx, exec, se.env.SQLParser()); err != nil { log.Errorf("Error in sidecardb.Init: %+v", err) if se.env.Config().DB.HasGlobalSettings() { log.Warning("Ignoring sidecardb.Init error for unmanaged tablets") @@ -536,7 +536,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { if shouldUseDatabase { // If reloadDataInDB succeeds, then we don't want to prevent sending the broadcast notification. // So, we do this step in the end when we can receive no more errors that fail the reload operation. - err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped) + err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped, se.env.SQLParser()) if err != nil { log.Errorf("error in updating schema information in Engine.reload() - %v", err) } @@ -828,7 +828,7 @@ func NewEngineForTests() *Engine { isOpen: true, tables: make(map[string]*Table), historian: newHistorian(false, 0, nil), - env: tabletenv.NewEnv(tabletenv.NewDefaultConfig(), "SchemaEngineForTests", collations.MySQL8()), + env: tabletenv.NewEnv(tabletenv.NewDefaultConfig(), "SchemaEngineForTests", collations.MySQL8(), sqlparser.NewTestParser()), } return se } @@ -848,6 +848,10 @@ func (se *Engine) CollationEnv() *collations.Environment { return se.env.CollationEnv() } +func (se *Engine) SQLParser() *sqlparser.Parser { + return se.env.SQLParser() +} + func extractNamesFromTablesList(tables []*Table) []string { var tableNames []string for _, table := range tables { diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 2dcdeea23cd..0e9f0dd9162 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -580,7 +580,7 @@ func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAge config.OlapReadPool.IdleTimeout = idleTimeout config.TxPool.IdleTimeout = idleTimeout config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds - se := NewEngine(tabletenv.NewEnv(config, "SchemaTest", collations.MySQL8())) + se := NewEngine(tabletenv.NewEnv(config, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser())) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) return se } @@ -764,7 +764,8 @@ func TestEngineMysqlTime(t *testing.T) { t.Run(tt.name, func(t *testing.T) { se := &Engine{} db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tt.timeStampErr != nil { @@ -870,7 +871,8 @@ func TestEnginePopulatePrimaryKeys(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} @@ -931,7 +933,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} se.innoDbReadRowsCounter = stats.NewCounter("TestEngineUpdateInnoDBRowsRead-"+tt.name, "") @@ -958,7 +961,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { // TestEngineGetTableData tests the functionality of getTableData function func TestEngineGetTableData(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, "TestEngineGetTableData", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) tests := []struct { @@ -1132,7 +1136,8 @@ func TestEngineReload(t *testing.T) { cfg := tabletenv.NewDefaultConfig() cfg.DB = newDBConfigs(db) cfg.SignalWhenSchemaChange = true - conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil) + env := tabletenv.NewEnv(nil, "TestEngineReload", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := newEngine(10*time.Second, 10*time.Second, 0, db) diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index b3da8cf18ab..5ae79193b36 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -235,7 +235,7 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl Size: 2, IdleTimeout: 10 * time.Second, } - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8()), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser()), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index 684bb6d317d..58019c4c018 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -134,12 +134,12 @@ func (tr *Tracker) process(ctx context.Context) { gtid = event.Gtid } if event.Type == binlogdatapb.VEventType_DDL && - MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName()) { + MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName(), tr.env.SQLParser()) { if err := tr.schemaUpdated(gtid, event.Statement, event.Timestamp); err != nil { tr.env.Stats().ErrorCounters.Add(vtrpcpb.Code_INTERNAL.String(), 1) log.Errorf("Error updating schema: %s for ddl %s, gtid %s", - sqlparser.TruncateForLog(err.Error()), event.Statement, gtid) + tr.env.SQLParser().TruncateForLog(err.Error()), event.Statement, gtid) } } } @@ -248,8 +248,8 @@ func encodeString(in string) string { } // MustReloadSchemaOnDDL returns true if the ddl is for the db which is part of the workflow and is not an online ddl artifact -func MustReloadSchemaOnDDL(sql string, dbname string) bool { - ast, err := sqlparser.Parse(sql) +func MustReloadSchemaOnDDL(sql string, dbname string, parser *sqlparser.Parser) bool { + ast, err := parser.Parse(sql) if err != nil { return false } diff --git a/go/vt/vttablet/tabletserver/schema/tracker_test.go b/go/vt/vttablet/tabletserver/schema/tracker_test.go index 017bb941af0..8b6f1458283 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker_test.go +++ b/go/vt/vttablet/tabletserver/schema/tracker_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) @@ -78,7 +79,7 @@ func TestTracker(t *testing.T) { } config := se.env.Config() config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) initial := env.Stats().ErrorCounters.Counts()["INTERNAL"] tracker := NewTracker(env, vs, se) tracker.Open() @@ -122,7 +123,7 @@ func TestTrackerShouldNotInsertInitialSchema(t *testing.T) { } config := se.env.Config() config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) tracker := NewTracker(env, vs, se) tracker.Open() <-vs.done @@ -170,7 +171,7 @@ func TestMustReloadSchemaOnDDL(t *testing.T) { } for _, tc := range testcases { t.Run("", func(t *testing.T) { - require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname)) + require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname, sqlparser.NewTestParser())) }) } } diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index adf53c1d2b1..4b88ce734d7 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -28,6 +28,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/dbconfigs" @@ -397,6 +399,10 @@ func (k *killableConn) Kill(message string, elapsed time.Duration) error { return nil } +func (k *killableConn) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + func TestStateManagerShutdownGracePeriod(t *testing.T) { sm := newTestStateManager(t) defer sm.StopService() @@ -704,11 +710,12 @@ func verifySubcomponent(t *testing.T, order int64, component any, state testStat func newTestStateManager(t *testing.T) *stateManager { order.Store(0) config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, "StateManagerTest", collations.MySQL8()) + env := tabletenv.NewEnv(config, "StateManagerTest", collations.MySQL8(), sqlparser.NewTestParser()) + parser := sqlparser.NewTestParser() sm := &stateManager{ - statelessql: NewQueryList("stateless"), - statefulql: NewQueryList("stateful"), - olapql: NewQueryList("olap"), + statelessql: NewQueryList("stateless", parser), + statefulql: NewQueryList("stateful", parser), + olapql: NewQueryList("olap", parser), hs: newHealthStreamer(env, &topodatapb.TabletAlias{}, schema.NewEngine(env)), se: &testSchemaEngine{}, rt: &testReplTracker{lag: 1 * time.Second}, diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index 739ed5c4295..067f2194655 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -184,11 +185,11 @@ func (sc *StatefulConnection) Renew() error { } // String returns a printable version of the connection info. -func (sc *StatefulConnection) String(sanitize bool) string { +func (sc *StatefulConnection) String(sanitize bool, parser *sqlparser.Parser) string { return fmt.Sprintf( "%v\t%s", sc.ConnID, - sc.txProps.String(sanitize), + sc.txProps.String(sanitize, parser), ) } diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool.go b/go/vt/vttablet/tabletserver/stateful_connection_pool.go index ce6f917610e..a28d153dca1 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool.go @@ -93,7 +93,7 @@ func (sf *StatefulConnectionPool) Close() { if conn.IsInTransaction() { thing = "transaction" } - log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages)) + log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages, sf.env.SQLParser())) sf.env.Stats().InternalErrors.Add("StrayTransactions", 1) conn.Close() conn.Releasef("pool closed") diff --git a/go/vt/vttablet/tabletserver/tabletenv/env.go b/go/vt/vttablet/tabletserver/tabletenv/env.go index 2d624dfa19b..8d53ad5d09c 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/env.go +++ b/go/vt/vttablet/tabletserver/tabletenv/env.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" ) // Env defines the functions supported by TabletServer @@ -32,6 +33,7 @@ type Env interface { Config() *TabletConfig Exporter() *servenv.Exporter Stats() *Stats + SQLParser() *sqlparser.Parser LogError() CollationEnv() *collations.Environment } @@ -41,17 +43,19 @@ type testEnv struct { exporter *servenv.Exporter stats *Stats collationEnv *collations.Environment + parser *sqlparser.Parser } // NewEnv creates an Env that can be used for tabletserver subcomponents // without an actual TabletServer. -func NewEnv(config *TabletConfig, exporterName string, collationEnv *collations.Environment) Env { +func NewEnv(config *TabletConfig, exporterName string, collationEnv *collations.Environment, parser *sqlparser.Parser) Env { exporter := servenv.NewExporter(exporterName, "Tablet") return &testEnv{ config: config, exporter: exporter, stats: NewStats(exporter), collationEnv: collationEnv, + parser: parser, } } @@ -60,6 +64,7 @@ func (te *testEnv) Config() *TabletConfig { return te.config } func (te *testEnv) Exporter() *servenv.Exporter { return te.exporter } func (te *testEnv) Stats() *Stats { return te.stats } func (te *testEnv) CollationEnv() *collations.Environment { return te.collationEnv } +func (te *testEnv) SQLParser() *sqlparser.Parser { return te.parser } func (te *testEnv) LogError() { if x := recover(); x != nil { diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 965c11ec366..af7ba01519c 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -131,6 +131,11 @@ type TabletServer struct { checkMysqlGaugeFunc *stats.GaugeFunc collationEnv *collations.Environment + parser *sqlparser.Parser +} + +func (tsv *TabletServer) SQLParser() *sqlparser.Parser { + return tsv.parser } var _ queryservice.QueryService = (*TabletServer)(nil) @@ -141,8 +146,8 @@ var _ queryservice.QueryService = (*TabletServer)(nil) var RegisterFunctions []func(Controller) // NewServer creates a new TabletServer based on the command line flags. -func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment) *TabletServer { - return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias, collationEnv) +func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) *TabletServer { + return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias, collationEnv, parser) } var ( @@ -152,7 +157,7 @@ var ( // NewTabletServer creates an instance of TabletServer. Only the first // instance of TabletServer will expose its state variables. -func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment) *TabletServer { +func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) *TabletServer { exporter := servenv.NewExporter(name, "Tablet") tsv := &TabletServer{ exporter: exporter, @@ -164,6 +169,7 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC topoServer: topoServer, alias: alias.CloneVT(), collationEnv: collationEnv, + parser: parser, } tsv.QueryTimeout.Store(config.Oltp.QueryTimeout.Nanoseconds()) @@ -176,9 +182,9 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC return tsv.sm.Target().TabletType } - tsv.statelessql = NewQueryList("oltp-stateless") - tsv.statefulql = NewQueryList("oltp-stateful") - tsv.olapql = NewQueryList("olap") + tsv.statelessql = NewQueryList("oltp-stateless", parser) + tsv.statefulql = NewQueryList("oltp-stateful", parser) + tsv.olapql = NewQueryList("olap", parser) tsv.se = schema.NewEngine(tsv) tsv.hs = newHealthStreamer(tsv, alias, tsv.se) tsv.rt = repltracker.NewReplTracker(tsv, alias) @@ -1612,13 +1618,13 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( // not a concern. var messagef, logMessage, query, truncatedQuery string messagef = fmt.Sprintf("Uncaught panic for %%v:\n%v\n%s", x, tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) - query = queryAsString(sql, bindVariables, tsv.TerseErrors, false) + query = queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.SQLParser()) terr := vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%s", fmt.Sprintf(messagef, query)) if tsv.TerseErrors == tsv.Config().SanitizeLogMessages { - truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true, tsv.SQLParser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } else { - truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } log.Error(logMessage) @@ -1678,20 +1684,20 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin sqlState := sqlErr.SQLState() errnum := sqlErr.Number() if tsv.TerseErrors && errCode != vtrpcpb.Code_FAILED_PRECONDITION { - err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false)) + err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.SQLParser())) if logMethod != nil { - message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } else { - err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false)) + err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false, tsv.SQLParser())) if logMethod != nil { - message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } } else { err = vterrors.Errorf(errCode, "%v%s", err.Error(), callerID) if logMethod != nil { - message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } @@ -2123,7 +2129,7 @@ func (tsv *TabletServer) ConsolidatorMode() string { // If sanitize is false it also includes the bind variables. // If truncateForLog is true, it truncates the sql query and the // bind variables. -func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool) string { +func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool, parser *sqlparser.Parser) string { // Add the bind vars unless this needs to be sanitized, e.g. for log messages bvBuf := &bytes.Buffer{} fmt.Fprintf(bvBuf, "BindVars: {") @@ -2147,7 +2153,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the bind vars if necessary bv := bvBuf.String() - maxLen := sqlparser.GetTruncateErrLen() + maxLen := parser.GetTruncateErrLen() if truncateForLog && maxLen > 0 && len(bv) > maxLen { if maxLen <= 12 { bv = sqlparser.TruncationText @@ -2158,7 +2164,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the sql query if necessary if truncateForLog { - sql = sqlparser.TruncateForLog(sql) + sql = parser.TruncateForLog(sql) } // sql is the normalized query without the bind vars diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index aa8565291be..4a275cd6253 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -31,6 +31,7 @@ import ( "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/dbconfigs" vttestpb "vitess.io/vitess/go/vt/proto/vttest" @@ -447,7 +448,7 @@ func TestTabletServerBeginFail(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TxPool.Size = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -955,7 +956,7 @@ func TestSerializeTransactionsSameRow(t *testing.T) { config.HotRowProtection.MaxConcurrency = 1 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1062,7 +1063,7 @@ func TestDMLQueryWithoutWhereClause(t *testing.T) { config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1090,7 +1091,7 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { config.HotRowProtection.MaxConcurrency = 2 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1226,7 +1227,7 @@ func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1309,7 +1310,7 @@ func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { config := tabletenv.NewDefaultConfig() config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1563,7 +1564,7 @@ func TestHandleExecUnknownError(t *testing.T) { defer cancel() logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") } @@ -1576,23 +1577,25 @@ func TestQueryAsString(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3333333333), "bv4": sqltypes.Int64BindVariable(4444444444), } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) - query := queryAsString(longSql, longBv, true, true) + query := queryAsString(longSql, longBv, true, true, parser) want := "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, true, false) + query = queryAsString(longSql, longBv, true, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, true) + query = queryAsString(longSql, longBv, false, true, parser) want = "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, false) + query = queryAsString(longSql, longBv, false, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" assert.Equal(t, want, query) } @@ -1683,7 +1686,7 @@ func TestHandleExecTabletError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1708,7 +1711,7 @@ func TestTerseErrors(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = false - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1742,7 +1745,7 @@ func TestSanitizeLogMessages(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1775,7 +1778,7 @@ func TestTerseErrorsNonSQLError(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1800,7 +1803,7 @@ func TestSanitizeLogMessagesNonSQLError(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1825,7 +1828,7 @@ func TestSanitizeMessagesBindVars(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1856,7 +1859,7 @@ func TestSanitizeMessagesNoBindVars(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "sensitive message"), nil) @@ -1874,7 +1877,7 @@ func TestTruncateErrorLen(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TruncateErrorLen = 32 - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1896,19 +1899,23 @@ func TestTruncateErrorLen(t *testing.T) { func TestTruncateMessages(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = false + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = false // Sanitize the log messages, which means that the bind vars are omitted - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + cfg.SanitizeLogMessages = true + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 52, + }) + require.NoError(t, err) + tsv := NewTabletServer(ctx, "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), parser) tl := newTestLogger() defer tl.Close() - sqlparser.SetTruncateErrLen(52) sql := "select * from test_table where xyz = :vtg1 order by abc desc" sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'" - err := tsv.convertAndLogError( + err = tsv.convertAndLogError( ctx, sql, map[string]*querypb.BindVariable{"vtg1": sqltypes.StringBindVariable("this is kinda long eh")}, @@ -1928,7 +1935,7 @@ func TestTruncateMessages(t *testing.T) { t.Errorf("log got '%s', want '%s'", tl.getLog(0), wantLog) } - sqlparser.SetTruncateErrLen(140) + parser.SetTruncateErrLen(140) err = tsv.convertAndLogError( ctx, sql, @@ -1948,7 +1955,6 @@ func TestTruncateMessages(t *testing.T) { if wantLog != tl.getLog(1) { t.Errorf("log got '%s', want '%s'", tl.getLog(1), wantLog) } - sqlparser.SetTruncateErrLen(0) } func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { @@ -1956,7 +1962,7 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "select * from test_table where id = :a", @@ -1998,7 +2004,7 @@ func TestACLHUP(t *testing.T) { defer cancel() tableacl.Register("simpleacl", &simpleacl.Factory{}) config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) f, err := os.CreateTemp("", "tableacl") require.NoError(t, err) @@ -2508,13 +2514,13 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T func setupTabletServerTest(t testing.TB, ctx context.Context, keyspaceName string) (*fakesqldb.DB, *TabletServer) { config := tabletenv.NewDefaultConfig() - return setupTabletServerTestCustom(t, ctx, config, keyspaceName) + return setupTabletServerTestCustom(t, ctx, config, keyspaceName, sqlparser.NewTestParser()) } -func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string) (*fakesqldb.DB, *TabletServer) { +func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string, parser *sqlparser.Parser) (*fakesqldb.DB, *TabletServer) { db := setupFakeDB(t) - sidecardb.AddSchemaInitQueries(db, true) - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8()) + sidecardb.AddSchemaInitQueries(db, true, parser) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), parser) require.Equal(t, StateNotConnected, tsv.sm.State()) dbcfgs := newDBConfigs(db) target := &querypb.Target{ @@ -2661,7 +2667,8 @@ func addTabletServerSupportedQueries(db *fakesqldb.DB) { "rollback": {}, fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + parser := sqlparser.NewTestParser() + sidecardb.AddSchemaInitQueries(db, true, parser) for query, result := range queryResultMap { db.AddQuery(query, result) } diff --git a/go/vt/vttablet/tabletserver/throttle/throttler_test.go b/go/vt/vttablet/tabletserver/throttle/throttler_test.go index 58821d29059..f0d895e1413 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler_test.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" - + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -110,7 +110,7 @@ func newTestThrottler() *Throttler { s.ThrottleThreshold = &atomic.Uint64{} s.ThrottleThreshold.Store(1) } - env := tabletenv.NewEnv(nil, "TabletServerTest", collations.MySQL8()) + env := tabletenv.NewEnv(nil, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) throttler := &Throttler{ mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), mysqlClusterThresholds: cache.New(cache.NoExpiration, 0), diff --git a/go/vt/vttablet/tabletserver/tx/api.go b/go/vt/vttablet/tabletserver/tx/api.go index a06923776c0..a392e530ffa 100644 --- a/go/vt/vttablet/tabletserver/tx/api.go +++ b/go/vt/vttablet/tabletserver/tx/api.go @@ -126,7 +126,7 @@ func (p *Properties) RecordQuery(query string) { func (p *Properties) InTransaction() bool { return p != nil } // String returns a printable version of the transaction -func (p *Properties) String(sanitize bool) string { +func (p *Properties) String(sanitize bool, parser *sqlparser.Parser) string { if p == nil { return "" } @@ -135,7 +135,7 @@ func (p *Properties) String(sanitize bool) string { sb := strings.Builder{} for _, query := range p.Queries { if sanitize { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = parser.RedactSQLQuery(query) } sb.WriteString(query) sb.WriteString(";") diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 4ed67dc2f66..d2ec33ef969 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -26,6 +26,7 @@ import ( "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" "github.com/stretchr/testify/assert" @@ -49,7 +50,7 @@ func TestTxEngineClose(t *testing.T) { config.TxPool.Size = 10 config.Oltp.TxTimeout = 100 * time.Millisecond config.GracePeriods.Shutdown = 0 - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) // Normal close. te.AcceptReadWrite() @@ -152,7 +153,7 @@ func TestTxEngineBegin(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) for _, exec := range []func() (int64, string, error){ func() (int64, string, error) { @@ -198,7 +199,7 @@ func TestTxEngineRenewFails(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) te.AcceptReadOnly() options := &querypb.ExecuteOptions{} connID, _, err := te.ReserveBegin(ctx, options, nil, nil) @@ -536,7 +537,7 @@ func setupTxEngine(db *fakesqldb.DB) *TxEngine { config.TxPool.Size = 10 config.Oltp.TxTimeout = 100 * time.Millisecond config.GracePeriods.Shutdown = 0 - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) return te } @@ -568,7 +569,7 @@ func TestTxEngineFailReserve(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) options := &querypb.ExecuteOptions{} _, err := te.Reserve(ctx, options, 0, nil) diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index f42e3c95408..f01d12b655c 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -130,7 +130,7 @@ func (tp *TxPool) Shutdown(ctx context.Context) { func (tp *TxPool) transactionKiller() { defer tp.env.LogError() for _, conn := range tp.scp.GetElapsedTimeout(vterrors.TxKillerRollback) { - log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages)) + log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages, tp.env.SQLParser())) switch { case conn.IsTainted(): conn.Close() diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 1d63e122f30..eae3153d874 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/dbconfigs" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" @@ -824,7 +825,7 @@ func newEnv(exporterName string) tabletenv.Env { config.OltpReadPool.IdleTimeout = 30 * time.Second config.OlapReadPool.IdleTimeout = 30 * time.Second config.TxPool.IdleTimeout = 30 * time.Second - env := tabletenv.NewEnv(config, exporterName, collations.MySQL8()) + env := tabletenv.NewEnv(config, exporterName, collations.MySQL8(), sqlparser.NewTestParser()) return env } diff --git a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go index 6257d38718a..ec9d9e1203a 100644 --- a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go +++ b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -48,7 +49,7 @@ func TestTxLimiter_DisabledAllowsAll(t *testing.T) { config.TransactionLimitByPrincipal = false config.TransactionLimitByComponent = false config.TransactionLimitBySubcomponent = false - limiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + limiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) im, ef := createCallers("", "", "", "") for i := 0; i < 5; i++ { if got, want := limiter.Get(im, ef), true; got != want { @@ -70,7 +71,7 @@ func TestTxLimiter_LimitsOnlyOffendingUser(t *testing.T) { config.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) @@ -136,7 +137,7 @@ func TestTxLimiterDryRun(t *testing.T) { config.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8())) + newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) diff --git a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go index 773e4f092c1..a2af61da963 100644 --- a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go +++ b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -48,7 +49,7 @@ func TestTxSerializer_NoHotRow(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -80,7 +81,7 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -104,7 +105,7 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { func TestKeySanitization(t *testing.T) { config := tabletenv.NewDefaultConfig() - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) // with a where clause key := "t1 where c1='foo'" want := "t1 ... [REDACTED]" @@ -126,7 +127,7 @@ func TestTxSerializer(t *testing.T) { config.HotRowProtection.MaxQueueSize = 2 config.HotRowProtection.MaxGlobalQueueSize = 3 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -199,7 +200,7 @@ func TestTxSerializer_ConcurrentTransactions(t *testing.T) { config.HotRowProtection.MaxQueueSize = 3 config.HotRowProtection.MaxGlobalQueueSize = 3 config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -322,7 +323,7 @@ func TestTxSerializerCancel(t *testing.T) { config.HotRowProtection.MaxQueueSize = 4 config.HotRowProtection.MaxGlobalQueueSize = 4 config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx3 and tx4 will record their number once they're done waiting. @@ -423,7 +424,7 @@ func TestTxSerializerDryRun(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 2 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -493,7 +494,7 @@ func TestTxSerializerGlobalQueueOverflow(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) // tx1. done1, waited1, err1 := txs.Wait(context.Background(), "t1 where1", "t1") @@ -534,7 +535,7 @@ func TestTxSerializerPending(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) if got, want := txs.Pending("t1 where1"), 0; got != want { t.Errorf("there should be no pending transaction: got = %v, want = %v", got, want) } @@ -545,7 +546,7 @@ func BenchmarkTxSerializer_NoHotRow(b *testing.B) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8())) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) b.ResetTimer() diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go index 1f1520fec26..3463e1ef65e 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go @@ -29,8 +29,8 @@ import ( "go.uber.org/mock/gomock" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -44,7 +44,7 @@ import ( func TestDisabledThrottler(t *testing.T) { config := tabletenv.NewDefaultConfig() config.EnableTxThrottler = false - env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) throttler := NewTxThrottler(env, nil) throttler.InitDBConfig(&querypb.Target{ Keyspace: "keyspace", @@ -108,7 +108,7 @@ func TestEnabledThrottler(t *testing.T) { config.EnableTxThrottler = true config.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA} - env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) throttler := NewTxThrottler(env, ts) throttlerImpl, _ := throttler.(*txThrottler) assert.NotNil(t, throttlerImpl) @@ -171,7 +171,7 @@ func TestFetchKnownCells(t *testing.T) { func TestDryRunThrottler(t *testing.T) { config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) testCases := []struct { Name string diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 2862601bf1b..d81ee7c2ce8 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -432,7 +432,7 @@ func (vse *Engine) setWatch() { } var vschema *vindexes.VSchema if v != nil { - vschema = vindexes.BuildVSchema(v) + vschema = vindexes.BuildVSchema(v, vse.env.SQLParser()) if err != nil { log.Errorf("Error building vschema: %v", err) vse.vschemaErrors.Add(1) diff --git a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go index 90387e97f2c..83369f27d5e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go +++ b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go @@ -24,6 +24,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -65,7 +66,7 @@ func Fuzz(data []byte) int { if err != nil { return -1 } - _, _ = buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ + _, _ = buildPlan(t1, testLocalVSchema, sqlparser.NewTestParser(), &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{ {Match: str1, Filter: str2}, }, diff --git a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go index f514298e844..5d57effbadf 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -86,7 +87,7 @@ func TestFindColVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string @@ -149,7 +150,7 @@ func TestFindOrCreateVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) lvs := &localVSchema{ keyspace: "ks1", @@ -204,7 +205,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go index 9d978a6e5f2..af41f900d25 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" ) @@ -94,7 +95,7 @@ func customEngine(t *testing.T, modifier func(mysql.ConnParams) mysql.ConnParams config := env.TabletEnv.Config().Clone() config.DB = dbconfigs.NewTestDBConfigs(modified, modified, modified.DbName) - engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8()), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8(), sqlparser.NewTestParser()), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) engine.InitDBConfig(env.KeyspaceName, env.ShardName) engine.Open() return engine diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index b60c0bd6754..e89276a5c98 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -286,11 +286,11 @@ func mustSendStmt(query mysql.Query, dbname string) bool { return true } -func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter) bool { +func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter, parser *sqlparser.Parser) bool { if query.Database != "" && query.Database != dbname { return false } - ast, err := sqlparser.Parse(query.SQL) + ast, err := parser.Parse(query.SQL) // If there was a parsing error, we send it through. Hopefully, // recipient can handle it. if err != nil { @@ -346,7 +346,7 @@ func tableMatches(table sqlparser.TableName, dbname string, filter *binlogdatapb return ruleMatches(table.Name.String(), filter) } -func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter, collationEnv *collations.Environment) (*Plan, error) { +func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter, collationEnv *collations.Environment, parser *sqlparser.Parser) (*Plan, error) { for _, rule := range filter.Rules { switch { case strings.HasPrefix(rule.Match, "/"): @@ -360,7 +360,7 @@ func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter, co } return buildREPlan(ti, vschema, rule.Filter, collationEnv) case rule.Match == ti.Name: - return buildTablePlan(ti, vschema, rule.Filter, collationEnv) + return buildTablePlan(ti, vschema, rule.Filter, collationEnv, parser) } } return nil, nil @@ -412,8 +412,8 @@ func buildREPlan(ti *Table, vschema *localVSchema, filter string, collationEnv * // BuildTablePlan handles cases where a specific table name is specified. // The filter must be a select statement. -func buildTablePlan(ti *Table, vschema *localVSchema, query string, collationEnv *collations.Environment) (*Plan, error) { - sel, fromTable, err := analyzeSelect(query) +func buildTablePlan(ti *Table, vschema *localVSchema, query string, collationEnv *collations.Environment, parser *sqlparser.Parser) (*Plan, error) { + sel, fromTable, err := analyzeSelect(query, parser) if err != nil { log.Errorf("%s", err.Error()) return nil, err @@ -443,8 +443,8 @@ func buildTablePlan(ti *Table, vschema *localVSchema, query string, collationEnv return plan, nil } -func analyzeSelect(query string) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelect(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, fromTable, err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go index 174c3b793f0..b6b62098060 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -20,18 +20,16 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -86,7 +84,7 @@ func init() { "ks": &kspb, }, } - vschema := vindexes.BuildVSchema(srvVSchema) + vschema := vindexes.BuildVSchema(srvVSchema, sqlparser.NewTestParser()) testLocalVSchema = &localVSchema{ keyspace: "ks", vschema: vschema, @@ -167,7 +165,7 @@ func TestMustSendDDL(t *testing.T) { }} for _, tcase := range testcases { q := mysql.Query{SQL: tcase.sql, Database: tcase.db} - got := mustSendDDL(q, "mydb", filter) + got := mustSendDDL(q, "mydb", filter, sqlparser.NewTestParser()) if got != tcase.output { t.Errorf("%v: %v, want %v", q, got, tcase.output) } @@ -647,7 +645,7 @@ func TestPlanBuilder(t *testing.T) { t.Run(tcase.inRule.String(), func(t *testing.T) { plan, err := buildPlan(tcase.inTable, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{tcase.inRule}, - }, collations.MySQL8()) + }, collations.MySQL8(), sqlparser.NewTestParser()) if tcase.outErr != "" { assert.Nil(t, plan) @@ -744,7 +742,7 @@ func TestPlanBuilderFilterComparison(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { plan, err := buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{Match: "t1", Filter: tcase.inFilter}}, - }, collations.MySQL8()) + }, collations.MySQL8(), sqlparser.NewTestParser()) if tcase.outErr != "" { assert.Nil(t, plan) diff --git a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go index 91f319fa2c5..88084d62a50 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go @@ -62,7 +62,7 @@ func (rs *resultStreamer) Cancel() { } func (rs *resultStreamer) Stream() error { - _, fromTable, err := analyzeSelect(rs.query) + _, fromTable, err := analyzeSelect(rs.query, rs.vse.env.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index a0ab7b4d533..395e152dfb0 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -139,7 +139,7 @@ func (rs *rowStreamer) Stream() error { func (rs *rowStreamer) buildPlan() error { // This pre-parsing is required to extract the table name // and create its metadata. - sel, fromTable, err := analyzeSelect(rs.query) + sel, fromTable, err := analyzeSelect(rs.query, rs.se.SQLParser()) if err != nil { return err } @@ -176,7 +176,7 @@ func (rs *rowStreamer) buildPlan() error { // This is because the row format of a read is identical // to the row format of a binlog event. So, the same // filtering will work. - rs.plan, err = buildTablePlan(ti, rs.vschema, rs.query, rs.se.CollationEnv()) + rs.plan, err = buildTablePlan(ti, rs.vschema, rs.query, rs.se.CollationEnv(), rs.se.SQLParser()) if err != nil { log.Errorf("%s", err.Error()) return err diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index b3899c5f25d..4a793407008 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -109,7 +110,7 @@ func Init(ctx context.Context) (*Env, error) { te.Dbcfgs = dbconfigs.NewTestDBConfigs(te.cluster.MySQLConnParams(), te.cluster.MySQLAppDebugConnParams(), te.cluster.DbName()) config := tabletenv.NewDefaultConfig() config.DB = te.Dbcfgs - te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8()) + te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8(), sqlparser.NewTestParser()) te.Mysqld = mysqlctl.NewMysqld(te.Dbcfgs) pos, _ := te.Mysqld.PrimaryPosition() if strings.HasPrefix(strings.ToLower(pos.GTIDSet.Flavor()), string(mysqlctl.FlavorMariaDB)) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index 4816a042e78..2ea26c3632d 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -503,7 +503,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_COMMIT, }) case sqlparser.StmtDDL: - if mustSendDDL(q, vs.cp.DBName(), vs.filter) { + if mustSendDDL(q, vs.cp.DBName(), vs.filter, vs.vse.env.SQLParser()) { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, Gtid: replication.EncodePosition(vs.pos), @@ -520,7 +520,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_OTHER, }) } - if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName()) { + if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName(), vs.vse.env.SQLParser()) { vs.se.ReloadAt(context.Background(), vs.pos) } case sqlparser.StmtSavepoint: @@ -738,7 +738,7 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap Name: tm.Name, Fields: cols, } - plan, err := buildPlan(table, vs.vschema, vs.filter, vs.se.CollationEnv()) + plan, err := buildPlan(table, vs.vschema, vs.filter, vs.se.CollationEnv(), vs.se.SQLParser()) if err != nil { return nil, err } @@ -764,7 +764,7 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, error) { var fields []*querypb.Field for i, typ := range tm.Types { - t, err := sqltypes.MySQLToType(int64(typ), 0) + t, err := sqltypes.MySQLToType(typ, 0) if err != nil { return nil, fmt.Errorf("unsupported type: %d, position: %d", typ, i) } @@ -957,7 +957,7 @@ func (vs *vstreamer) rebuildPlans() error { // cause that to change. continue } - newPlan, err := buildPlan(plan.Table, vs.vschema, vs.filter, vs.se.CollationEnv()) + newPlan, err := buildPlan(plan.Table, vs.vschema, vs.filter, vs.se.CollationEnv(), vs.se.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 6134e68f71c..8d75dcebe44 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -35,6 +35,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" @@ -554,7 +556,7 @@ func (db *LocalCluster) createVTSchema() error { return db.ExecuteFetch(query, "") } - if err := sidecardb.Init(context.Background(), sidecardbExec); err != nil { + if err := sidecardb.Init(context.Background(), sidecardbExec, sqlparser.NewTestParser()); err != nil { return err } return nil diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go index 952738ba8b5..ebaef4305c4 100644 --- a/go/vt/wrangler/external_cluster_test.go +++ b/go/vt/wrangler/external_cluster_test.go @@ -10,6 +10,7 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" ) @@ -18,7 +19,7 @@ func TestVitessCluster(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") tmc := newTestWranglerTMClient() - wr := New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" t.Run("Zero clusters to start", func(t *testing.T) { diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 03fad81d7b8..7fce5ce9afc 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -162,7 +162,7 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul } func (dc *fakeDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index 7eec43f0fd1..6f23d437460 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -23,30 +23,28 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/collations" - vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/grpctmserver" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager" + vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/vttablet/tabletservermock" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - // import the gRPC client implementation for tablet manager _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" @@ -191,7 +189,6 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { ft.Tablet.PortMap["vt"] = vtPort ft.Tablet.PortMap["grpc"] = gRPCPort ft.Tablet.Hostname = "127.0.0.1" - config := &tabletenv.TabletConfig{} // Create a test tm on that port, and re-read the record // (it has new ports and IP). ft.TM = &tabletmanager.TabletManager{ @@ -200,8 +197,9 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { MysqlDaemon: ft.FakeMysqlDaemon, DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), - VDiffEngine: vdiff2.NewEngine(config, wr.TopoServer(), ft.Tablet, collations.MySQL8()), + VDiffEngine: vdiff2.NewEngine(wr.TopoServer(), ft.Tablet, collations.MySQL8(), sqlparser.NewTestParser()), CollationEnv: collations.MySQL8(), + SQLParser: sqlparser.NewTestParser(), } if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatal(err) diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 13f430919fd..9d39eec969d 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -445,7 +445,7 @@ func (wr *Wrangler) checkIfPreviousJournalExists(ctx context.Context, mz *materi mu sync.Mutex exists bool tablets []string - ws = workflow.NewServer(wr.ts, wr.tmc) + ws = workflow.NewServer(wr.ts, wr.tmc, wr.parser) ) err := forAllSources(func(si *topo.ShardInfo) error { @@ -540,7 +540,7 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp return nil, nil, nil, fmt.Errorf("vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = wr.parser.ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, fmt.Errorf("vindex table name must be in the form .
. Got: %v", vindex.Params["table"]) } @@ -837,7 +837,7 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s return fmt.Errorf("vindex %s not found in vschema", qualifiedVindexName) } - targetKeyspace, targetTableName, err := sqlparser.ParseTable(sourceVindex.Params["table"]) + targetKeyspace, targetTableName, err := wr.parser.ParseTable(sourceVindex.Params["table"]) if err != nil || targetKeyspace == "" { return fmt.Errorf("vindex table name must be in the form .
. Got: %v", sourceVindex.Params["table"]) } @@ -1064,7 +1064,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater if err != nil { return nil, err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, wr.parser) if err != nil { return nil, err } @@ -1220,7 +1220,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.wr.parser.TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -1236,7 +1236,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.wr.parser) if err != nil { return err } @@ -1245,7 +1245,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.wr.parser) if err != nil { return err } @@ -1266,7 +1266,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { // We use schemadiff to normalize the schema. // For now, and because this is could have wider implications, we ignore any errors in // reading the source schema. - schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs, mz.wr.parser) if err != nil { log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) } else { @@ -1291,9 +1291,8 @@ func (mz *materializer) deploySchema(ctx context.Context) error { }) } -func stripTableForeignKeys(ddl string) (string, error) { - - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1321,8 +1320,8 @@ func stripTableForeignKeys(ddl string) (string, error) { return newDDL, nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1368,7 +1367,7 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.wr.parser.Parse(ts.SourceExpression) if err != nil { return "", err } diff --git a/go/vt/wrangler/materializer_env_test.go b/go/vt/wrangler/materializer_env_test.go index d01e9c7ab32..6c209ad21f6 100644 --- a/go/vt/wrangler/materializer_env_test.go +++ b/go/vt/wrangler/materializer_env_test.go @@ -130,7 +130,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + parser := sqlparser.NewTestParser() + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), parser) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -146,7 +147,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := parser.TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 7d46be48397..5dd5929adb9 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -31,10 +31,10 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -1543,7 +1543,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { defer cancel() topoServ := memorytopo.NewServer(ctx, "cell") - wr := New(logutil.NewConsoleLogger(), topoServ, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), topoServ, nil, collations.MySQL8(), sqlparser.NewTestParser()) unique := map[string]*vschemapb.Vindex{ "v": { @@ -2543,7 +2543,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) defer env.close() tabletID := 100 @@ -2872,7 +2872,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -2946,7 +2946,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index dbad6b2ee29..d23f3f016f8 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -60,7 +60,7 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string ev := &events.Reparent{} // do the work - err = grpcvtctldserver.NewVtctldServer(wr.ts).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ + err = grpcvtctldserver.NewVtctldServer(wr.ts, wr.parser).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ Keyspace: keyspace, Shard: shard, PrimaryElectTabletAlias: primaryElectTabletAlias, diff --git a/go/vt/wrangler/resharder_env_test.go b/go/vt/wrangler/resharder_env_test.go index 867f8670bc7..911a0be6d9c 100644 --- a/go/vt/wrangler/resharder_env_test.go +++ b/go/vt/wrangler/resharder_env_test.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -94,7 +95,7 @@ func newTestResharderEnv(t *testing.T, ctx context.Context, sources, targets []s cell: "cell", tmc: newTestResharderTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) initTopo(t, env.topoServ, "ks", sources, targets, []string{"cell"}) tabletID := 100 for _, shard := range sources { diff --git a/go/vt/wrangler/tablet_test.go b/go/vt/wrangler/tablet_test.go index 9f2c869fa0a..6df20535d71 100644 --- a/go/vt/wrangler/tablet_test.go +++ b/go/vt/wrangler/tablet_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" ) @@ -37,7 +38,7 @@ func TestInitTabletShardConversion(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -71,7 +72,7 @@ func TestDeleteTabletBasic(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -103,7 +104,7 @@ func TestDeleteTabletTruePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -150,7 +151,7 @@ func TestDeleteTabletFalsePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet1 := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -202,7 +203,7 @@ func TestDeleteTabletShardNonExisting(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index c2ef5d8e675..b188b5343d5 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -37,6 +37,7 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -91,7 +92,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -343,7 +344,7 @@ func TestBackupRestoreLagged(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -562,7 +563,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -737,7 +738,7 @@ func TestDisableActiveReparents(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index ebee27b1c0e..f45bb8dba1e 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -56,7 +57,7 @@ func copySchema(t *testing.T, useShardAsSource bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index dbd7a9f978a..b4c9dc4c8a7 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" @@ -50,7 +51,7 @@ func TestEmergencyReparentShard(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -204,7 +205,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create a primary, a couple good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index 4ebaa69bb4d..f5f1b157406 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -22,12 +22,12 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/discovery" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -51,7 +51,7 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -144,7 +144,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -227,7 +227,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -320,7 +320,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -406,7 +406,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, and a good replica. oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -510,7 +510,7 @@ func TestRPCTabletExternallyReparentedDemotesPrimaryToConfiguredTabletType(t *te ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary and a new primary oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_SPARE, nil) diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index 56f1596dca9..af5ebad06b1 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/grpctmserver" @@ -211,6 +212,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { QueryServiceControl: tabletservermock.NewController(), VREngine: vreplication.NewTestEngine(wr.TopoServer(), ft.Tablet.Alias.Cell, ft.FakeMysqlDaemon, binlogplayer.NewFakeDBClient, binlogplayer.NewFakeDBClient, topoproto.TabletDbName(ft.Tablet), nil), CollationEnv: collations.MySQL8(), + SQLParser: sqlparser.NewTestParser(), } if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatalf("Error in tablet - %v, err - %v", topoproto.TabletAliasString(ft.Tablet.Alias), err.Error()) diff --git a/go/vt/wrangler/testlib/find_tablet_test.go b/go/vt/wrangler/testlib/find_tablet_test.go index 41e308a98ee..d6c142d9030 100644 --- a/go/vt/wrangler/testlib/find_tablet_test.go +++ b/go/vt/wrangler/testlib/find_tablet_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -37,7 +38,7 @@ func TestFindTablet(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, two good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go index 55f436e9c24..37913da6fd2 100644 --- a/go/vt/wrangler/testlib/permissions_test.go +++ b/go/vt/wrangler/testlib/permissions_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/sqltypes" @@ -48,7 +49,7 @@ func TestPermissions(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index c38de353966..39bde50b3e9 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -52,7 +53,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -168,7 +169,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -304,7 +305,7 @@ func TestPlannedReparentInitialization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -390,7 +391,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -498,7 +499,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -604,7 +605,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -684,7 +685,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -769,7 +770,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -909,7 +910,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index c7c5f06e4ac..55a7e3b225b 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -49,7 +50,7 @@ func TestShardReplicationStatuses(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -133,7 +134,7 @@ func TestReparentTablet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -190,7 +191,7 @@ func TestSetReplicationSource(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0") diff --git a/go/vt/wrangler/testlib/shard_test.go b/go/vt/wrangler/testlib/shard_test.go index d5af9599d7a..244c7a1fa44 100644 --- a/go/vt/wrangler/testlib/shard_test.go +++ b/go/vt/wrangler/testlib/shard_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" @@ -36,7 +37,7 @@ func TestDeleteShardCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go index fb6cdbc0209..3ba291c9f33 100644 --- a/go/vt/wrangler/testlib/version_test.go +++ b/go/vt/wrangler/testlib/version_test.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/logutil" @@ -71,7 +72,7 @@ func TestVersion(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/vtctl_pipe.go b/go/vt/wrangler/testlib/vtctl_pipe.go index 3ec62ffdc82..8eef9ed5fe8 100644 --- a/go/vt/wrangler/testlib/vtctl_pipe.go +++ b/go/vt/wrangler/testlib/vtctl_pipe.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctlserver" "vitess.io/vitess/go/vt/vtctl/vtctlclient" @@ -77,7 +78,7 @@ func NewVtctlPipe(t *testing.T, ts *topo.Server) *VtctlPipe { // Create a gRPC server and listen on the port server := grpc.NewServer() - grpcvtctlserver.StartServer(server, ts, collations.MySQL8()) + grpcvtctlserver.StartServer(server, ts, collations.MySQL8(), sqlparser.NewTestParser()) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index cb9e9193fc1..c8a47960f3d 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -224,7 +224,7 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl return nil, nil, err } - ws := workflow.NewServer(wr.ts, wr.tmc) + ws := workflow.NewServer(wr.ts, wr.tmc, wr.parser) state := &workflow.State{ Workflow: workflowName, SourceKeyspace: ts.SourceKeyspaceName(), @@ -556,7 +556,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel) + sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel, wr.parser) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -956,7 +956,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, wr.parser) if err != nil { return nil, err } @@ -1150,7 +1150,7 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, // If so, it also returns the list of sourceWorkflows that need to be switched. func (ts *trafficSwitcher) checkJournals(ctx context.Context) (journalsExist bool, sourceWorkflows []string, err error) { var ( - ws = workflow.NewServer(ts.TopoServer(), ts.TabletManagerClient()) + ws = workflow.NewServer(ts.TopoServer(), ts.TabletManagerClient(), ts.wr.parser) mu sync.Mutex ) diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index d0ca09d6624..128719a8225 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -119,7 +119,7 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, targetShards []string, fmtQuery string) *testMigraterEnv { tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = sourceShards tme.targetShards = targetShards @@ -383,7 +383,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar require.Greater(t, len(shards), 1, "shard by shard migrations can only be done on sharded keyspaces") tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = shards tme.targetShards = shards @@ -539,7 +539,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targetShards []string) *testShardMigraterEnv { tme := &testShardMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.sourceShards = sourceShards tme.targetShards = targetShards tme.tmeDB = fakesqldb.New(t) diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 5e4e064e238..35b1e72a459 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -112,6 +112,7 @@ type vdiff struct { targetTimeZone string collationEnv *collations.Environment + parser *sqlparser.Parser } // compareColInfo contains the metadata for a column of the table being diffed @@ -146,6 +147,7 @@ type tableDiffer struct { targetPrimitive engine.Primitive collationEnv *collations.Environment + parser *sqlparser.Parser } // shardStreamer streams rows from one shard. This works for @@ -223,6 +225,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou sourceTimeZone: ts.sourceTimeZone, targetTimeZone: ts.targetTimeZone, collationEnv: wr.collationEnv, + parser: wr.parser, } for shard, source := range ts.Sources() { df.sources[shard] = &shardStreamer{ @@ -490,8 +493,8 @@ func (df *vdiff) buildVDiffPlan(ctx context.Context, filter *binlogdatapb.Filter // findPKs identifies PKs, determines any collations to be used for // them, and removes them from the columns used for data comparison. -func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer, collationEnv *collations.Environment) (sqlparser.OrderBy, error) { - columnCollations, err := getColumnCollations(table, collationEnv) +func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer, collationEnv *collations.Environment, parser *sqlparser.Parser) (sqlparser.OrderBy, error) { + columnCollations, err := getColumnCollations(table, collationEnv, parser) if err != nil { return nil, err } @@ -535,8 +538,8 @@ func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser // getColumnCollations determines the proper collation to use for each // column in the table definition leveraging MySQL's collation inheritance // rules. -func getColumnCollations(table *tabletmanagerdatapb.TableDefinition, collationEnv *collations.Environment) (map[string]collations.ID, error) { - createstmt, err := sqlparser.Parse(table.Schema) +func getColumnCollations(table *tabletmanagerdatapb.TableDefinition, collationEnv *collations.Environment, parser *sqlparser.Parser) (map[string]collations.ID, error) { + createstmt, err := parser.Parse(table.Schema) if err != nil { return nil, err } @@ -650,7 +653,7 @@ func getColumnNameForSelectExpr(selectExpression sqlparser.SelectExpr) (string, // buildTablePlan builds one tableDiffer. func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, query string) (*tableDiffer, error) { - statement, err := sqlparser.Parse(query) + statement, err := df.parser.Parse(query) if err != nil { return nil, err } @@ -661,6 +664,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer td := &tableDiffer{ targetTable: table.Name, collationEnv: df.collationEnv, + parser: df.parser, } sourceSelect := &sqlparser.Select{} targetSelect := &sqlparser.Select{} @@ -740,7 +744,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer }, } - orderby, err := findPKs(table, targetSelect, td, df.collationEnv) + orderby, err := findPKs(table, targetSelect, td, df.collationEnv, df.parser) if err != nil { return nil, err } @@ -1329,7 +1333,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]sqltypes.Value) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go index 5f1c3f5072e..cf76318339a 100644 --- a/go/vt/wrangler/vdiff_env_test.go +++ b/go/vt/wrangler/vdiff_env_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -78,7 +79,7 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) // Generate a unique dialer name. dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) diff --git a/go/vt/wrangler/vdiff_test.go b/go/vt/wrangler/vdiff_test.go index 1d25f00c830..cae4f6afca1 100644 --- a/go/vt/wrangler/vdiff_test.go +++ b/go/vt/wrangler/vdiff_test.go @@ -1100,7 +1100,7 @@ func TestVDiffFindPKs(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - _, err := findPKs(tc.table, tc.targetSelect, tc.tdIn, collations.MySQL8()) + _, err := findPKs(tc.table, tc.targetSelect, tc.tdIn, collations.MySQL8(), sqlparser.NewTestParser()) require.NoError(t, err) require.EqualValues(t, tc.tdOut, tc.tdIn) }) @@ -1258,7 +1258,7 @@ func TestGetColumnCollations(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getColumnCollations(tt.table, collationEnv) + got, err := getColumnCollations(tt.table, collationEnv, sqlparser.NewTestParser()) if (err != nil) != tt.wantErr { t.Errorf("getColumnCollations() error = %v, wantErr = %t", err, tt.wantErr) return diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index c705ad3f6f1..41f1eaa9396 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -275,7 +275,7 @@ func (vx *vexec) execCallback(callback func(context.Context, *topo.TabletInfo) ( // parseQuery parses the input query func (vx *vexec) parseQuery() (err error) { - if vx.stmt, err = sqlparser.Parse(vx.query); err != nil { + if vx.stmt, err = vx.wr.parser.Parse(vx.query); err != nil { return err } if vx.tableName, err = extractTableName(vx.stmt); err != nil { @@ -835,7 +835,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ where = " where state <> 'Stopped'" } query := "select distinct workflow from _vt.vreplication" + where - vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc) + vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc, wr.parser) results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err diff --git a/go/vt/wrangler/vexec_plan.go b/go/vt/wrangler/vexec_plan.go index 5b68d9ada5f..6178844c398 100644 --- a/go/vt/wrangler/vexec_plan.go +++ b/go/vt/wrangler/vexec_plan.go @@ -259,7 +259,7 @@ func (vx *vexec) buildUpdatePlan(ctx context.Context, planner vexecPlanner, upd } } if templates := plannerParams.updateTemplates; len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.parser.QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } @@ -311,7 +311,7 @@ func (vx *vexec) buildInsertPlan(ctx context.Context, planner vexecPlanner, ins return nil, fmt.Errorf("query not supported by vexec: %s", sqlparser.String(ins)) } if len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.parser.QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vexec_test.go b/go/vt/wrangler/vexec_test.go index 10289740144..254d42ee49e 100644 --- a/go/vt/wrangler/vexec_test.go +++ b/go/vt/wrangler/vexec_test.go @@ -34,6 +34,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" ) func TestVExec(t *testing.T) { @@ -45,7 +46,7 @@ func TestVExec(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, time.Now().Unix()) defer env.close() var logger = logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc, collations.MySQL8()) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) vx := newVExec(ctx, workflow, keyspace, query, wr) err := vx.getPrimaries() @@ -190,7 +191,7 @@ func TestWorkflowListStreams(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc, collations.MySQL8()) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) _, err := wr.WorkflowAction(ctx, workflow, keyspace, "listall", false, nil) require.NoError(t, err) @@ -366,7 +367,7 @@ func TestWorkflowListAll(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc, collations.MySQL8()) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) workflows, err := wr.ListAllWorkflows(ctx, keyspace, true) require.Nil(t, err) @@ -387,7 +388,7 @@ func TestVExecValidations(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() - wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) vx := newVExec(ctx, workflow, keyspace, query, wr) @@ -473,7 +474,7 @@ func TestWorkflowUpdate(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc, collations.MySQL8()) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) nullSlice := textutil.SimulatedNullStringSlice // Used to represent a non-provided value nullOnDDL := binlogdatapb.OnDDLAction(textutil.SimulatedNullInt) // Used to represent a non-provided value tests := []struct { diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index 59ab836b1f8..26332b58bd9 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -60,17 +61,19 @@ type Wrangler struct { sem *semaphore.Weighted collationEnv *collations.Environment + parser *sqlparser.Parser } // New creates a new Wrangler object. -func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient, collationEnv *collations.Environment) *Wrangler { +func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient, collationEnv *collations.Environment, parser *sqlparser.Parser) *Wrangler { return &Wrangler{ logger: logger, ts: ts, tmc: tmc, - vtctld: grpcvtctldserver.NewVtctldServer(ts), + vtctld: grpcvtctldserver.NewVtctldServer(ts, parser), sourceTs: ts, collationEnv: collationEnv, + parser: parser, } } @@ -84,6 +87,7 @@ func NewTestWrangler(logger logutil.Logger, ts *topo.Server, tmc tmclient.Tablet vtctld: grpcvtctldserver.NewTestVtctldServer(ts, tmc), sourceTs: ts, collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } } @@ -114,3 +118,8 @@ func (wr *Wrangler) SetLogger(logger logutil.Logger) { func (wr *Wrangler) Logger() logutil.Logger { return wr.logger } + +// SQLParser returns the parser this wrangler is using. +func (wr *Wrangler) SQLParser() *sqlparser.Parser { + return wr.parser +} diff --git a/go/vt/wrangler/wrangler_env_test.go b/go/vt/wrangler/wrangler_env_test.go index 46846784918..c62a1d1bf50 100644 --- a/go/vt/wrangler/wrangler_env_test.go +++ b/go/vt/wrangler/wrangler_env_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -69,7 +70,7 @@ func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetS tabletType: topodatapb.TabletType_REPLICA, tmc: newTestWranglerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8()) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) env.tmc.tablets = make(map[int]*testWranglerTablet) // Generate a unique dialer name.